From 505ca9219c89db51be9d0d30b76629e8671a4400 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Tue, 26 Mar 2024 14:06:38 +0100 Subject: [PATCH] Cleanup code by applying inspections --- .../amqp/AmqpConnectionProvider.scala | 14 +- .../amqp/AmqpConnectorSettings.scala | 2 +- .../AbstractAmqpAsyncFlowStageLogic.scala | 10 +- .../amqp/impl/AmqpAsyncFlowStage.scala | 9 +- .../amqp/impl/AmqpReplyToSinkStage.scala | 5 +- .../amqp/impl/AmqpRpcFlowStage.scala | 31 +- .../amqp/impl/AmqpSourceStage.scala | 13 +- .../scala/docs/scaladsl/AmqpDocsSpec.scala | 4 +- .../connectors/amqp/AmqpProxyConnection.scala | 24 +- ...raphStageLogicConnectionShutdownSpec.scala | 2 +- .../avroparquet/impl/AvroParquetFlow.scala | 1 - .../docs/scaladsl/AvroParquetFlowSpec.scala | 2 +- .../docs/scaladsl/AvroParquetSinkSpec.scala | 2 +- .../docs/scaladsl/AvroParquetSourceSpec.scala | 2 +- .../EventBridgePublishSettings.scala | 2 +- .../EventBridgePublishMockSpec.scala | 4 +- .../awslambda/javadsl/AwsLambdaFlow.scala | 2 +- .../awslambda/scaladsl/AwsLambdaFlow.scala | 2 +- .../java/docs/javadsl/AwsLambdaFlowTest.java | 4 +- .../docs/scaladsl/AwsLambdaFlowSpec.scala | 19 +- .../impl/AzureQueueSourceStage.scala | 8 +- .../connectors/azure/storagequeue/model.scala | 2 +- .../azure/storagequeue/settings.scala | 4 +- .../scala/docs/scaladsl/AzureQueueSpec.scala | 6 +- ...stry-more-specific-type.backwards.excludes | 2 + ...stry-more-specific-type.backwards.excludes | 2 + .../final-class.backwards.excludes | 1 + .../cassandra/CassandraMetricsRegistry.scala | 2 +- .../cassandra/CassandraSessionSettings.scala | 2 +- .../cassandra/CqlSessionProvider.scala | 13 +- .../DriverConfigLoaderFromConfig.scala | 4 +- .../PekkoDiscoverySessionProvider.scala | 8 +- .../cassandra/javadsl/CassandraFlow.scala | 6 +- .../cassandra/scaladsl/CassandraFlow.scala | 9 +- .../cassandra/scaladsl/CassandraSession.scala | 36 +- .../scaladsl/CassandraSessionRegistry.scala | 7 +- .../java/docs/javadsl/CassandraFlowTest.java | 2 +- .../docs/scaladsl/CassandraFlowSpec.scala | 10 +- .../javadsl/CassandraSessionSpec.scala | 3 +- .../scaladsl/CassandraLifecycle.scala | 6 +- .../CassandraSessionPerformanceSpec.scala | 3 +- ...stry-more-specific-type.backwards.excludes | 2 + .../final-class.backwards.excludes | 1 + .../couchbase/CouchbaseSessionRegistry.scala | 7 +- .../impl/CouchbaseClusterRegistry.scala | 3 +- .../couchbase/impl/CouchbaseSessionImpl.scala | 3 +- .../couchbase/impl/RxUtilities.scala | 25 +- .../couchbase/javadsl/CouchbaseSession.scala | 2 +- .../stream/connectors/couchbase/model.scala | 2 +- .../couchbase/testing/CouchbaseSupport.scala | 2 +- .../connectors/csv/scaladsl/CsvBench.scala | 4 +- .../csv/javadsl/CsvQuotingStyle.java | 2 +- .../connectors/csv/impl/CsvFormatter.scala | 22 +- .../connectors/csv/impl/CsvParser.scala | 16 +- .../connectors/csv/impl/CsvParsingStage.scala | 6 +- .../csv/impl/CsvToMapJavaStage.scala | 25 +- .../connectors/csv/impl/CsvToMapStage.scala | 11 +- .../pekko/stream/connectors/csv/model.scala | 4 +- .../scala/docs/scaladsl/CsvParsingSpec.scala | 91 ++- .../eip/javadsl/PassThroughExamples.java | 2 +- .../test/java/docs/javadsl/ExampleTest.java | 2 +- .../scala/docs/scaladsl/ExampleSpec.scala | 2 +- .../ElasticsearchSourceSettings.scala | 2 +- .../elasticsearch/SourceSettingsBase.scala | 2 +- .../elasticsearch/WriteMessage.scala | 2 +- .../elasticsearch/WriteSettingsBase.scala | 2 +- .../elasticsearch/impl/ElasticsearchApi.scala | 8 +- .../impl/ElasticsearchSimpleFlowStage.scala | 10 +- .../impl/ElasticsearchSourceStage.scala | 48 +- .../elasticsearch/impl/RestBulkApi.scala | 8 +- .../elasticsearch/impl/RestBulkApiV5.scala | 5 +- .../elasticsearch/impl/RestBulkApiV7.scala | 5 +- .../javadsl/ElasticsearchSource.scala | 4 +- .../scaladsl/ElasticsearchFlow.scala | 24 +- .../scaladsl/ElasticsearchSource.scala | 6 +- .../ElasticsearchParameterizedTest.java | 5 +- .../docs/javadsl/ElasticsearchV5Test.java | 13 +- .../docs/javadsl/ElasticsearchV7Test.java | 18 +- .../javadsl/OpensearchParameterizedTest.java | 5 +- .../java/docs/javadsl/OpensearchV1Test.java | 18 +- .../ElasticsearchConnectorBehaviour.scala | 2 +- .../scaladsl/ElasticsearchSpecUtils.scala | 12 +- .../docs/scaladsl/ElasticsearchV5Spec.scala | 24 +- .../docs/scaladsl/ElasticsearchV7Spec.scala | 24 +- .../OpensearchConnectorBehaviour.scala | 2 +- .../docs/scaladsl/OpensearchV1Spec.scala | 24 +- .../connectors/file/javadsl/Directory.java | 2 +- .../file/javadsl/FileTailSource.java | 2 +- ...missing-private-methods.backwards.excludes | 11 + .../impl/archive/EnsureByteStreamSize.scala | 14 +- .../archive/FileByteStringSeparators.scala | 7 +- .../file/impl/archive/TarArchiveEntry.scala | 6 +- .../file/impl/archive/TarArchiveManager.scala | 3 +- .../file/impl/archive/TarReaderStage.scala | 65 +- .../file/impl/archive/ZipArchiveFlow.scala | 6 +- .../file/impl/archive/ZipReaderSource.scala | 15 +- .../connectors/file/javadsl/Archive.scala | 4 +- .../file/javadsl/LogRotatorSink.scala | 1 - .../pekko/stream/connectors/file/model.scala | 14 +- .../file/scaladsl/FileTailSource.scala | 2 +- .../file/scaladsl/LogRotatorSink.scala | 56 +- .../docs/javadsl/NestedTarReaderTest.java | 2 +- .../scala/docs/scaladsl/DirectorySpec.scala | 2 +- .../scala/docs/scaladsl/ExecutableUtils.scala | 3 +- .../docs/scaladsl/LogRotatorSinkSpec.scala | 18 +- .../scala/docs/scaladsl/TarArchiveSpec.scala | 15 +- .../connectors/ftp/impl/LegacyFtpsClient.java | 4 - .../ftp/impl/CommonFtpOperations.scala | 14 +- .../ftp/impl/FtpBrowserGraphStage.scala | 9 +- .../ftp/impl/FtpGraphStageLogic.scala | 4 +- .../connectors/ftp/impl/FtpIOGraphStage.scala | 20 +- .../connectors/ftp/impl/FtpOperations.scala | 12 +- .../ftp/impl/FtpSourceFactory.scala | 35 +- .../connectors/ftp/impl/FtpsOperations.scala | 12 +- .../connectors/ftp/impl/SftpOperations.scala | 11 +- .../connectors/ftp/javadsl/FtpApi.scala | 19 +- .../pekko/stream/connectors/ftp/model.scala | 16 +- .../java/docs/javadsl/FtpWritingTest.java | 6 +- .../connectors/ftp/CommonFtpStageTest.java | 1 - .../stream/connectors/ftp/FtpsStageTest.java | 1 - .../stream/connectors/ftp/BaseSpec.scala | 2 +- .../geode/impl/pdx/ObjectDecoder.scala | 5 +- .../impl/pdx/LabelledGenericGeneric.scala | 1 - .../connectors/geode/impl/GeodeCache.scala | 2 +- .../geode/impl/pdx/PdxDecoder.scala | 4 +- .../impl/stage/GeodeCQueryGraphLogic.scala | 20 +- .../stage/GeodeContinuousSourceStage.scala | 14 +- .../impl/stage/GeodeFiniteSourceStage.scala | 4 +- .../geode/impl/stage/GeodeFlowStage.scala | 14 +- .../impl/stage/GeodeQueryGraphLogic.scala | 3 +- .../impl/stage/GeodeSourceStageLogic.scala | 3 +- .../connectors/geode/scaladsl/Geode.scala | 8 +- .../javadsl/GeodeFiniteSourceTestCase.java | 8 +- .../scala/docs/scaladsl/GeodeBaseSpec.scala | 4 +- .../scaladsl/GeodeContinuousSourceSpec.scala | 4 +- .../src/test/scala/docs/scaladsl/Model.scala | 6 +- .../docs/scaladsl/PersonPdxSerializer.scala | 16 +- ...rExt-more-specific-type.backwards.excludes | 2 + .../storage/BigQueryRecordMapImpl.scala | 8 +- .../bigquery/storage/ProtobufConverters.scala | 6 +- .../bigquery/storage/impl/ArrowSource.scala | 15 +- .../bigquery/storage/impl/AvroSource.scala | 3 +- ...Settings.scala => PekkoGrpcSettings.scala} | 2 +- .../storage/impl/SDKClientSource.scala | 3 +- .../storage/scaladsl/BigQueryStorage.scala | 3 +- .../scaladsl/GrpcBigQueryStorageReader.scala | 2 +- .../storage/BigQueryStorageSpecBase.scala | 12 +- .../scaladsl/ArrowByteStringDecoder.scala | 8 +- ...mats-more-specific-type.backwards.excludes | 12 + ...rExt-more-specific-type.backwards.excludes | 2 + ...rExt-more-specific-type.backwards.excludes | 4 + ...rExt-more-specific-type.backwards.excludes | 4 + .../bigquery/BigQueryException.scala | 4 +- .../googlecloud/bigquery/BigQueryExt.scala | 2 +- .../bigquery/BigQuerySettings.scala | 10 +- .../bigquery/InsertAllRetryPolicy.scala | 6 +- .../bigquery/model/DatasetJsonProtocol.scala | 51 +- ...otoJsonProtocol.scala => ErrorProto.scala} | 23 +- .../bigquery/model/JobJsonProtocol.scala | 161 ++--- .../bigquery/model/QueryJsonProtocol.scala | 119 ++-- .../model/TableDataJsonProtocol.scala | 72 +-- .../bigquery/model/TableJsonProtocol.scala | 166 ++--- .../bigquery/scaladsl/BigQueryTableData.scala | 4 +- .../schema/PrimitiveSchemaWriter.scala | 3 +- .../scaladsl/spray/BigQueryBasicFormats.scala | 54 +- .../spray/BigQueryCollectionFormats.scala | 10 +- .../spray/BigQueryRestBasicFormats.scala | 4 +- .../scala/docs/scaladsl/BigQueryDoc.scala | 8 +- .../googlecloud/bigquery/e2e/A.scala | 7 +- .../e2e/scaladsl/BigQueryEndToEndSpec.scala | 2 +- ...rExt-more-specific-type.backwards.excludes | 2 + ...rExt-more-specific-type.backwards.excludes | 4 + ...rExt-more-specific-type.backwards.excludes | 4 + .../pubsub/grpc/javadsl/GrpcPublisher.scala | 2 +- .../pubsub/grpc/javadsl/GrpcSubscriber.scala | 2 +- .../pubsub/grpc/scaladsl/GooglePubSub.scala | 5 +- .../pubsub/grpc/scaladsl/GrpcPublisher.scala | 2 +- .../pubsub/grpc/scaladsl/GrpcSubscriber.scala | 2 +- .../test/scala/docs/scaladsl/ExampleApp.scala | 2 +- .../scala/docs/scaladsl/IntegrationSpec.scala | 2 +- .../googlecloud/pubsub/impl/PubSubApi.scala | 8 +- .../connectors/googlecloud/pubsub/model.scala | 20 +- .../pubsub/scaladsl/GooglePubSub.scala | 7 +- .../java/docs/javadsl/ExampleUsageJava.java | 29 +- .../googlecloud/pubsub/GooglePubSubSpec.scala | 3 +- .../pubsub/impl/PubSubApiSpec.scala | 8 +- ...SExt-more-specific-type.backwards.excludes | 2 + ...eExt-more-specific-type.backwards.excludes | 2 + .../googlecloud/storage/Bucket.scala | 2 +- .../googlecloud/storage/FailedUpload.scala | 4 +- .../googlecloud/storage/GCSAttributes.scala | 6 +- .../googlecloud/storage/GCSExt.scala | 2 +- .../storage/GCStorageAttributes.scala | 6 +- .../googlecloud/storage/GCStorageExt.scala | 2 +- .../storage/GCStorageSettings.scala | 6 +- .../googlecloud/storage/StorageObject.scala | 6 +- .../storage/impl/GCStorageStream.scala | 2 +- .../storage/javadsl/GCStorage.scala | 4 +- .../googlecloud/storage/settings.scala | 2 +- .../scaladsl/GCStorageWiremockBase.scala | 3 +- ...Type-more-specific-type.backwards.excludes | 1 + .../stream/connectors/google/GoogleExt.scala | 4 +- .../connectors/google/GoogleSettings.scala | 98 +-- .../connectors/google/PaginatedRequest.scala | 3 +- .../connectors/google/ResumableUpload.scala | 4 +- .../auth/ComputeEngineCredentials.scala | 3 +- .../connectors/google/auth/Credentials.scala | 2 +- .../google/auth/GoogleOAuth2Credentials.scala | 3 +- .../google/auth/OAuth2Credentials.scala | 4 +- .../auth/ServiceAccountCredentials.scala | 7 +- .../google/auth/UserAccessCredentials.scala | 10 +- .../stream/connectors/google/implicits.scala | 2 +- .../scaladsl/`X-Upload-Content-Type`.scala | 2 +- .../stream/connectors/google/util/Retry.scala | 2 +- .../google/auth/OAuth2CredentialsSpec.scala | 2 +- .../google/http/GoogleHttpSpec.scala | 3 +- .../final-class.backwards.excludes | 32 + .../firebase/fcm/FcmNotificationModels.scala | 38 +- .../google/firebase/fcm/FcmSettings.scala | 32 +- .../firebase/fcm/impl/FcmJsonSupport.scala | 6 +- .../google/firebase/fcm/impl/FcmSender.scala | 5 +- .../firebase/fcm/v1/impl/FcmJsonSupport.scala | 6 +- .../firebase/fcm/v1/impl/FcmSender.scala | 5 +- .../fcm/v1/models/AndroidConfig.scala | 8 +- .../firebase/fcm/v1/models/ApnsConfig.scala | 2 +- .../fcm/v1/models/BasicNotification.scala | 2 +- .../fcm/v1/models/FcmNotification.scala | 2 +- .../firebase/fcm/v1/models/FcmOption.scala | 8 +- .../fcm/v1/models/NotificationTarget.scala | 20 +- .../fcm/v1/models/WebPushConfig.scala | 2 +- .../scala/docs/scaladsl/FcmExamples.scala | 2 +- .../connectors/hbase/HTableSettings.scala | 2 +- .../hbase/impl/HBaseCapabilities.scala | 7 +- .../hbase/impl/HBaseFlowStage.scala | 10 +- .../hbase/impl/HBaseSourceStage.scala | 7 +- .../java/docs/javadsl/HBaseStageTest.java | 50 +- .../scala/docs/scaladsl/HBaseStageSpec.scala | 8 +- .../connectors/hdfs/impl/HdfsFlowStage.scala | 11 +- .../impl/writer/CompressedDataWriter.scala | 3 +- .../connectors/hdfs/javadsl/HdfsSource.scala | 10 +- .../pekko/stream/connectors/hdfs/model.scala | 12 +- .../scala/docs/scaladsl/HdfsWriterSpec.scala | 8 +- .../final-class.backwards.excludes | 20 + .../pushkit/ForwardProxyHttpsContext.scala | 7 +- .../pushkit/ForwardProxyPoolSettings.scala | 2 +- .../huawei/pushkit/HmsSettingExt.scala | 2 +- .../huawei/pushkit/HmsSettings.scala | 52 +- .../huawei/pushkit/impl/HmsSession.scala | 7 +- .../huawei/pushkit/impl/HmsTokenApi.scala | 4 +- .../pushkit/impl/PushKitJsonSupport.scala | 4 +- .../huawei/pushkit/impl/PushKitSender.scala | 5 +- .../huawei/pushkit/models/AndroidConfig.scala | 15 +- .../huawei/pushkit/models/ApnsConfig.scala | 2 +- .../pushkit/models/BasicNotification.scala | 3 +- .../pushkit/models/NotificationTarget.scala | 20 +- .../pushkit/models/PushKitNotification.scala | 2 +- .../huawei/pushkit/models/WebConfig.scala | 6 +- .../huawei/pushkit/impl/HmsTokenApiSpec.scala | 2 +- .../pushkit/impl/PushKitSenderSpec.scala | 2 +- .../influxdb/impl/InfluxDbFlowStage.scala | 25 +- .../influxdb/impl/InfluxDbSourceStage.scala | 23 +- .../PekkoConnectorsResultMapperHelper.scala | 100 ++- .../test/java/docs/javadsl/InfluxDbTest.java | 18 +- .../src/test/java/docs/javadsl/TestUtils.java | 4 +- .../test/scala/docs/scaladsl/FlowSpec.scala | 4 +- .../docs/scaladsl/InfluxDbSourceSpec.scala | 6 +- .../scala/docs/scaladsl/InfluxDbSpec.scala | 14 +- .../final-class.backwards.excludes | 4 + .../connectors/ironmq/IronMqSettings.scala | 4 +- .../stream/connectors/ironmq/domain.scala | 8 +- .../stream/connectors/ironmq/impl/Codec.scala | 4 +- .../connectors/ironmq/impl/IronMqClient.scala | 35 +- .../ironmq/impl/IronMqPullStage.scala | 3 +- .../ironmq/impl/IronMqPushStage.scala | 5 +- .../ironmq/impl/ReservedMessage.scala | 6 +- .../ironmq/javadsl/IronMqProducer.scala | 2 +- .../connectors/ironmq/javadsl/package.scala | 8 +- .../final-class.backwards.excludes | 12 + .../stream/connectors/jms/Credentials.scala | 2 +- .../stream/connectors/jms/Envelopes.scala | 6 +- .../pekko/stream/connectors/jms/Headers.scala | 22 +- .../connectors/jms/JmsBrowseSettings.scala | 2 +- .../connectors/jms/JmsConsumerSettings.scala | 2 +- .../stream/connectors/jms/JmsExceptions.scala | 15 +- .../stream/connectors/jms/JmsMessages.scala | 6 +- .../connectors/jms/JmsProducerSettings.scala | 2 +- .../jms/impl/InternalConnectionState.scala | 8 +- .../jms/impl/JmsAckSourceStage.scala | 3 +- .../connectors/jms/impl/JmsBrowseStage.scala | 9 +- .../connectors/jms/impl/JmsConnector.scala | 32 +- .../jms/impl/JmsConsumerStage.scala | 8 +- .../jms/impl/JmsMessageProducer.scala | 5 +- .../jms/impl/JmsProducerStage.scala | 18 +- .../jms/impl/JmsTxSourceStage.scala | 47 +- .../stream/connectors/jms/impl/Sessions.scala | 5 +- .../jms/impl/SourceStageLogic.scala | 13 +- .../jms/scaladsl/JmsConnectorState.scala | 6 +- .../docs/scaladsl/JmsConnectorsSpec.scala | 3 +- .../docs/scaladsl/JmsTxConnectorsSpec.scala | 14 +- .../connectors/jms/JmsProducerRetrySpec.scala | 6 +- .../connectors/jms/JmsSharedServerSpec.scala | 6 +- .../pekko/stream/connectors/jms/JmsSpec.scala | 3 +- .../jms/impl/SoftReferenceCacheSpec.scala | 5 +- .../scaladsl/CachedConnectionFactory.scala | 5 +- .../json/impl/JsonStreamReader.scala | 8 +- .../final-class.backwards.excludes | 5 + .../kinesis/CommittableRecord.scala | 3 +- .../connectors/kinesis/KinesisErrors.scala | 2 +- .../kinesis/KinesisFlowSettings.scala | 4 +- .../connectors/kinesis/ShardIterator.scala | 6 +- .../connectors/kinesis/ShardSettings.scala | 4 +- .../impl/KinesisSchedulerSourceStage.scala | 16 +- .../kinesis/impl/KinesisSourceStage.scala | 10 +- .../kinesis/impl/ShardProcessor.scala | 5 +- .../KinesisFirehoseErrors.scala | 2 +- .../KinesisFirehoseFlowSettings.scala | 2 +- .../scaladsl/KinesisFirehoseFlow.scala | 2 +- .../scaladsl/KinesisFirehoseSnippets.scala | 3 +- .../scala/docs/scaladsl/KinesisSnippets.scala | 3 +- .../connectors/kinesis/KinesisFlowSpec.scala | 21 +- .../kinesis/KinesisSourceSpec.scala | 29 +- .../stream/connectors/kinesis/Valve.scala | 17 +- .../KinesisFirehoseFlowSpec.scala | 18 +- ...tExt-more-specific-type.backwards.excludes | 2 + .../connectors/kudu/KuduClientExt.scala | 4 +- .../connectors/kudu/impl/KuduFlowStage.scala | 14 +- .../connectors/mongodb/DocumentUpdate.scala | 4 +- .../java/docs/javadsl/MongoSourceTest.java | 13 +- .../mqtt/streaming/impl/BehaviorRunner.scala | 10 +- .../mqtt/streaming/impl/ClientState.scala | 39 +- .../mqtt/streaming/impl/MqttFrameStage.scala | 8 +- .../mqtt/streaming/impl/QueueOfferState.scala | 5 +- .../mqtt/streaming/impl/RequestState.scala | 217 +++---- .../mqtt/streaming/impl/ServerState.scala | 578 +++++++++--------- .../connectors/mqtt/streaming/model.scala | 111 ++-- .../mqtt/streaming/scaladsl/Mqtt.scala | 8 +- .../scala/docs/scaladsl/MqttSessionSpec.scala | 4 +- .../streaming/impl/QueueOfferStateSpec.scala | 2 +- .../connectors/mqtt/impl/MqttFlowStage.scala | 23 +- .../stream/connectors/mqtt/settings.scala | 2 +- .../scala/docs/scaladsl/MqttSpecBase.scala | 2 +- .../orientdb/OrientDbSourceSettings.scala | 2 +- .../orientdb/OrientDbWriteSettings.scala | 2 +- .../orientdb/impl/OrientDbFlowStage.scala | 10 +- .../orientdb/impl/OrientDbSourceStage.scala | 4 +- .../stream/connectors/orientdb/model.scala | 2 +- .../test/java/docs/javadsl/OrientDbTest.java | 9 +- .../scala/docs/scaladsl/OrientDbSpec.scala | 12 +- .../pravega/javadsl/PravegaTable.java | 8 +- .../connectors/pravega/PravegaSettings.scala | 21 +- .../pravega/impl/PravegaCapabilities.scala | 6 +- .../connectors/pravega/impl/PravegaFlow.scala | 6 +- .../pravega/impl/PravegaSource.scala | 6 +- .../pravega/impl/PravegaTableReadFlow.scala | 5 +- .../pravega/impl/PravegaTableSource.scala | 46 +- .../pravega/impl/PravegaTableWriteFlow.scala | 5 +- .../pravega/impl/PravegaWriter.scala | 3 +- .../docs/javadsl/PravegaReadWriteDocs.java | 2 +- .../pravega/PravegaGraphTestCase.java | 2 +- .../pravega/PravegaKVTableTestCase.java | 5 +- .../src/test/scala/docs/scaladsl/Model.scala | 2 +- .../connectors/pravega/PravegaBaseSpec.scala | 7 +- .../pravega/PravegaKVTableSpec.scala | 2 +- project/Common.scala | 5 +- .../connectors/reference/Resource.scala | 13 +- .../connectors/reference/attributes.scala | 2 +- .../reference/impl/ReferenceFlowStage.scala | 2 +- .../test/java/docs/javadsl/ReferenceTest.java | 2 +- .../scala/docs/scaladsl/ReferenceSpec.scala | 2 +- .../stream/connectors/s3/S3Attributes.scala | 4 +- .../stream/connectors/s3/S3Exception.scala | 2 +- .../pekko/stream/connectors/s3/S3Ext.scala | 2 +- .../stream/connectors/s3/S3Headers.scala | 2 +- .../s3/headers/ServerSideEncryption.scala | 6 +- .../connectors/s3/headers/StorageClass.scala | 8 +- .../connectors/s3/impl/DiskBuffer.scala | 14 +- .../connectors/s3/impl/HttpRequests.scala | 3 +- .../connectors/s3/impl/Marshalling.scala | 4 +- .../connectors/s3/impl/MemoryBuffer.scala | 10 +- .../s3/impl/MemoryWithContext.scala | 10 +- .../stream/connectors/s3/impl/S3Stream.scala | 80 ++- .../connectors/s3/impl/SplitAfterSize.scala | 10 +- .../s3/impl/SplitAfterSizeWithContext.scala | 6 +- .../s3/impl/auth/CanonicalRequest.scala | 12 +- .../connectors/s3/impl/auth/SigningKey.scala | 10 +- .../pekko/stream/connectors/s3/model.scala | 12 +- .../pekko/stream/connectors/s3/settings.scala | 28 +- s3/src/test/java/docs/javadsl/S3Test.java | 8 +- .../stream/connectors/s3/MinioContainer.scala | 2 +- .../connectors/s3/impl/DiskBufferSpec.scala | 10 +- .../s3/impl/SplitAfterSizeSpec.scala | 10 +- .../auth/{authSpec.scala => AuthSpec.scala} | 2 +- .../s3/scaladsl/S3IntegrationSpec.scala | 11 +- scripts/authors.scala | 2 +- .../recordio/impl/RecordIOFramingStage.scala | 14 +- .../connectors/slick/javadsl/package.scala | 34 +- .../DocSnippetFlowWithPassThrough.java | 4 +- .../java/docs/javadsl/DocSnippetSink.java | 4 +- .../java/docs/javadsl/DocSnippetSource.java | 4 +- slick/src/test/java/docs/javadsl/User.java | 9 +- .../scala/docs/scaladsl/DocSnippets.scala | 32 +- .../test/scala/docs/scaladsl/SlickSpec.scala | 22 +- .../java/docs/javadsl/SnsPublisherTest.java | 2 +- .../sns/SnsPublishMockingSpec.scala | 2 +- .../stream/connectors/solr/SolrMessages.scala | 2 +- .../connectors/solr/SolrUpdateSettings.scala | 4 +- .../connectors/solr/impl/SolrFlowStage.scala | 19 +- .../solr/impl/SolrSourceStage.scala | 7 +- .../test/scala/docs/scaladsl/SolrSpec.scala | 24 +- .../stream/connectors/sqs/SqsModel.scala | 4 +- .../sqs/SqsPublishGroupedSettings.scala | 2 +- .../connectors/sqs/SqsSourceSettings.scala | 16 +- .../sqs/impl/BalancingMapAsync.scala | 12 +- .../connectors/sqs/scaladsl/SqsSource.scala | 5 +- .../sqs/scaladsl/SqsSourceMockSpec.scala | 5 +- .../java/docs/javadsl/EventSourceTest.java | 3 +- .../scala/docs/scaladsl/EventSourceSpec.scala | 8 +- .../testkit/CapturingAppender.scala | 7 +- .../connectors/testkit/LogbackUtil.scala | 4 +- .../testkit/javadsl/LogCapturingJunit4.scala | 5 +- .../connectors/text/impl/CharsetLogic.scala | 14 +- .../docs/javadsl/CharsetCodingFlowsDoc.java | 3 - .../stream/connectors/udp/impl/UdpBind.scala | 15 +- .../stream/connectors/udp/impl/UdpSend.scala | 7 +- .../pekko/stream/connectors/udp/model.scala | 8 +- udp/src/test/java/docs/javadsl/UdpTest.java | 2 +- .../test/scala/docs/scaladsl/UdpSpec.scala | 2 +- ...cket-more-specific-type.backwards.excludes | 4 + .../impl/UnixDomainSocketImpl.scala | 24 +- .../javadsl/UnixDomainSocket.scala | 5 +- .../scaladsl/UnixDomainSocket.scala | 5 +- .../stream/connectors/xml/impl/Coalesce.scala | 3 +- .../xml/impl/StreamingXmlParser.scala | 9 +- .../xml/impl/StreamingXmlWriter.scala | 31 +- .../stream/connectors/xml/impl/Subslice.scala | 23 +- .../stream/connectors/xml/impl/Subtree.scala | 19 +- .../connectors/xml/javadsl/XmlParsing.scala | 4 +- .../pekko/stream/connectors/xml/model.scala | 16 +- .../connectors/xml/scaladsl/XmlParsing.scala | 3 +- .../java/docs/javadsl/XmlParsingTest.java | 34 +- 440 files changed, 2614 insertions(+), 2949 deletions(-) create mode 100644 cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraMetricsRegistry-more-specific-type.backwards.excludes create mode 100644 cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraSessionRegistry-more-specific-type.backwards.excludes create mode 100644 cassandra/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 couchbase/src/main/mima-filters/1.1.x.backwards.excludes/CouchbaseSessionRegistry-more-specific-type.backwards.excludes create mode 100644 couchbase/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 file/src/main/mima-filters/1.1.x.backwards.excludes/LogRotatorSink-missing-private-methods.backwards.excludes create mode 100644 google-cloud-bigquery-storage/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes rename google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/{AkkaGrpcSettings.scala => PekkoGrpcSettings.scala} (97%) create mode 100644 google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/BigQueryBasicFormats-more-specific-type.backwards.excludes create mode 100644 google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes create mode 100644 google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes create mode 100644 google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes rename google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/{ErrorProtoJsonProtocol.scala => ErrorProto.scala} (76%) create mode 100644 google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes create mode 100644 google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes create mode 100644 google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes create mode 100644 google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSExt-more-specific-type.backwards.excludes create mode 100644 google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSStorageExt-more-specific-type.backwards.excludes create mode 100644 google-common/src/main/mima-filters/1.1.x.backwards.excludes/XUploadContentType-more-specific-type.backwards.excludes create mode 100644 google-fcm/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 huawei-push-kit/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 ironmq/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 jms/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 kinesis/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes create mode 100644 kudu/src/main/mima-filters/1.1.x.backwards.excludes/KuduClientExt-more-specific-type.backwards.excludes rename s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/{authSpec.scala => AuthSpec.scala} (94%) create mode 100644 unix-domain-socket/src/main/mima-filters/1.1.x.backwards.excludes/UnixDomainSocket-more-specific-type.backwards.excludes diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala index c6a3ca303..597bb7e9f 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala @@ -143,11 +143,10 @@ final class AmqpDetailsConnectionProvider private ( if (sslConfiguration.trustManager.isDefined) factory.useSslProtocol(sslConfiguration.protocol.get, sslConfiguration.trustManager.get) else factory.useSslProtocol(sslConfiguration.protocol.get) - } else if (sslConfiguration.context.isDefined) { + } else if (sslConfiguration.context.isDefined) factory.useSslProtocol(sslConfiguration.context.get) - } else { + else factory.useSslProtocol() - } }) requestedHeartbeat.foreach(factory.setRequestedHeartbeat) connectionTimeout.foreach(factory.setConnectionTimeout) @@ -244,9 +243,8 @@ object AmqpCredentials { final class AmqpSSLConfiguration private (val protocol: Option[String] = None, val trustManager: Option[TrustManager] = None, val context: Option[SSLContext] = None) { - if (protocol.isDefined && context.isDefined) { + if (protocol.isDefined && context.isDefined) throw new IllegalArgumentException("Protocol and context can't be defined in the same AmqpSSLConfiguration.") - } def withProtocol(protocol: String): AmqpSSLConfiguration = copy(protocol = Some(protocol)) @@ -419,10 +417,8 @@ final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionPr throw new ConcurrentModificationException( "Unexpected concurrent modification while closing the connection.") } - } else { - if (!state.compareAndSet(c, Connected(cachedConnection, clients - 1))) - releaseRecursive(amqpConnectionProvider, connection) - } + } else if (!state.compareAndSet(c, Connected(cachedConnection, clients - 1))) + releaseRecursive(amqpConnectionProvider, connection) case Closing => releaseRecursive(amqpConnectionProvider, connection) } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectorSettings.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectorSettings.scala index 5ffc9e74c..1fd756b47 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectorSettings.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectorSettings.scala @@ -152,7 +152,7 @@ final class TemporaryQueueSourceSettings private ( } object TemporaryQueueSourceSettings { - def apply(connectionProvider: AmqpConnectionProvider, exchange: String) = + def apply(connectionProvider: AmqpConnectionProvider, exchange: String): TemporaryQueueSourceSettings = new TemporaryQueueSourceSettings(connectionProvider, exchange) /** diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala index c8d5ae721..5eb46af10 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala @@ -72,9 +72,7 @@ import scala.concurrent.Promise val callback = getAsyncCallback[(DeliveryTag, Boolean)] { case (tag: DeliveryTag, multiple: Boolean) => confirmCallback(tag, multiple) } - new ConfirmCallback { // cant use function literal because it doesn't work with 2.11 - override def handle(tag: DeliveryTag, multiple: Boolean): Unit = callback.invoke((tag, multiple)) - } + (tag: DeliveryTag, multiple: Boolean) => callback.invoke((tag, multiple)) } private def onConfirmation(tag: DeliveryTag, multiple: Boolean): Unit = { @@ -155,9 +153,8 @@ import scala.concurrent.Promise if (noAwaitingMessages && exitQueue.isEmpty) { streamCompletion.success(Done) super.onUpstreamFinish() - } else { + } else log.debug("Received upstream finish signal - stage will be closed when all buffered messages are processed") - } private def publish(message: WriteMessage): DeliveryTag = { val tag: DeliveryTag = channel.getNextPublishSeqNo @@ -193,10 +190,9 @@ import scala.concurrent.Promise override protected def onTimer(timerKey: Any): Unit = timerKey match { - case tag: DeliveryTag => { + case tag: DeliveryTag => log.debug("Received timeout for deliveryTag {}.", tag) onRejection(tag, multiple = false) - } case _ => () } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpAsyncFlowStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpAsyncFlowStage.scala index 3e4c41888..f338246f4 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpAsyncFlowStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpAsyncFlowStage.scala @@ -58,15 +58,14 @@ import scala.concurrent.{ Future, Promise } buffer += (tag -> AwaitingMessage(tag, passThrough)) override def dequeueAwaitingMessages(tag: DeliveryTag, multiple: Boolean): Iterable[AwaitingMessage[T]] = - if (multiple) { + if (multiple) dequeueWhile((t, _) => t <= tag) - } else { + else { setReady(tag) - if (isAtHead(tag)) { + if (isAtHead(tag)) dequeueWhile((_, message) => message.ready) - } else { + else Seq.empty - } } private def dequeueWhile( diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala index e012e5e1d..643179f40 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala @@ -31,7 +31,7 @@ import scala.concurrent.{ Future, Promise } private[amqp] final class AmqpReplyToSinkStage(replyToSinkSettings: AmqpReplyToSinkSettings) extends GraphStageWithMaterializedValue[SinkShape[WriteMessage], Future[Done]] { stage => - val in = Inlet[WriteMessage]("AmqpReplyToSink.in") + val in: Inlet[WriteMessage] = Inlet[WriteMessage]("AmqpReplyToSink.in") override def shape: SinkShape[WriteMessage] = SinkShape.of(in) @@ -82,9 +82,8 @@ private[amqp] final class AmqpReplyToSinkStage(replyToSinkSettings: AmqpReplyToS elem.immediate, elem.properties.orNull, elem.bytes.toArray) - } else if (settings.failIfReplyToMissing) { + } else if (settings.failIfReplyToMissing) onFailure(new RuntimeException("Reply-to header was not set")) - } tryPull(in) } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala index 289ff27eb..dddac3fd3 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala @@ -42,8 +42,8 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf extends GraphStageWithMaterializedValue[FlowShape[WriteMessage, CommittableReadResult], Future[String]] { stage => - val in = Inlet[WriteMessage]("AmqpRpcFlow.in") - val out = Outlet[CommittableReadResult]("AmqpRpcFlow.out") + val in: Inlet[WriteMessage] = Inlet[WriteMessage]("AmqpRpcFlow.in") + val out: Outlet[CommittableReadResult] = Outlet[CommittableReadResult]("AmqpRpcFlow.out") override def shape: FlowShape[WriteMessage, CommittableReadResult] = FlowShape.of(in, out) @@ -70,7 +70,7 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf val consumerCallback = getAsyncCallback(handleDelivery) val commitCallback = getAsyncCallback[AckArguments] { - case AckArguments(deliveryTag, multiple, promise) => { + case AckArguments(deliveryTag, multiple, promise) => try { channel.basicAck(deliveryTag, multiple) unackedMessages -= 1 @@ -81,10 +81,9 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf } catch { case e: Throwable => promise.failure(e) } - } } val nackCallback = getAsyncCallback[NackArguments] { - case NackArguments(deliveryTag, multiple, requeue, promise) => { + case NackArguments(deliveryTag, multiple, requeue, promise) => try { channel.basicNack(deliveryTag, multiple, requeue) unackedMessages -= 1 @@ -95,7 +94,6 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf } catch { case e: Throwable => promise.failure(e) } - } } val amqpSourceConsumer = new DefaultConsumer(channel) { @@ -105,7 +103,7 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf body: Array[Byte]): Unit = consumerCallback.invoke( new CommittableReadResult { - override val message = ReadResult(ByteString(body), envelope, properties) + override val message: ReadResult = ReadResult(ByteString(body), envelope, properties) override def ack(multiple: Boolean): Future[Done] = { val promise = Promise[Done]() @@ -148,21 +146,19 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf } def handleDelivery(message: CommittableReadResult): Unit = - if (isAvailable(out)) { + if (isAvailable(out)) pushMessage(message) - } else if (queue.size + 1 > bufferSize) { + else if (queue.size + 1 > bufferSize) onFailure(new RuntimeException(s"Reached maximum buffer size $bufferSize")) - } else { + else queue.enqueue(message) - } setHandler( out, new OutHandler { override def onPull(): Unit = - if (queue.nonEmpty) { + if (queue.nonEmpty) pushMessage(queue.dequeue()) - } override def onDownstreamFinish(cause: Throwable): Unit = { setKeepGoing(true) @@ -207,15 +203,14 @@ private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, buf val expectedResponses: Int = { val headers = props.getHeaders - if (headers == null) { + if (headers == null) responsesPerMessage - } else { + else { val r = headers.get("expectedReplies") - if (r != null) { + if (r != null) r.asInstanceOf[Int] - } else { + else responsesPerMessage - } } } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSourceStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSourceStage.scala index 2e950ef95..c33ec75f0 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSourceStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSourceStage.scala @@ -94,9 +94,8 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi properties: BasicProperties, body: Array[Byte]): Unit = { val message = if (ackRequired) { - new CommittableReadResult { - override val message = ReadResult(ByteString(body), envelope, properties) + override val message: ReadResult = ReadResult(ByteString(body), envelope, properties) override def ack(multiple: Boolean): Future[Done] = { val promise = Promise[Done]() @@ -155,21 +154,19 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi } def handleDelivery(message: CommittableReadResult): Unit = - if (isAvailable(out)) { + if (isAvailable(out)) pushMessage(message) - } else if (queue.size + 1 > bufferSize) { + else if (queue.size + 1 > bufferSize) onFailure(new RuntimeException(s"Reached maximum buffer size $bufferSize")) - } else { + else queue.enqueue(message) - } setHandler( out, new OutHandler { override def onPull(): Unit = - if (queue.nonEmpty) { + if (queue.nonEmpty) pushMessage(queue.dequeue()) - } override def onDownstreamFinish(cause: Throwable): Unit = if (unackedMessages == 0) super.onDownstreamFinish(cause) diff --git a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala index dab24b6a3..6d38e0abe 100644 --- a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala +++ b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala @@ -35,7 +35,7 @@ class AmqpDocsSpec extends AmqpSpec { override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds) - val businessLogic: CommittableReadResult => Future[CommittableReadResult] = Future.successful(_) + val businessLogic: CommittableReadResult => Future[CommittableReadResult] = Future.successful "The AMQP Connectors" should { @@ -158,7 +158,7 @@ class AmqpDocsSpec extends AmqpSpec { val mergingFlow = mergedSources .viaMat(KillSwitches.single)(Keep.right) .to(Sink.fold(Set.empty[Int]) { - case (seen, (branch, element)) => + case (seen, (branch, _)) => if (seen.size == fanoutSize) completion.trySuccess(Done) seen + branch }) diff --git a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpProxyConnection.scala b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpProxyConnection.scala index e9ccb9567..bdb5e20ae 100644 --- a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpProxyConnection.scala +++ b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpProxyConnection.scala @@ -29,21 +29,21 @@ import com.rabbitmq.client._ * otherwise undefined */ class AmqpProxyConnection(protected val delegate: Connection) extends Connection { - override def getAddress: InetAddress = delegate.getAddress() + override def getAddress: InetAddress = delegate.getAddress - override def getPort: Int = delegate.getPort() + override def getPort: Int = delegate.getPort - override def getChannelMax: Int = delegate.getChannelMax() + override def getChannelMax: Int = delegate.getChannelMax - override def getFrameMax: Int = delegate.getFrameMax() + override def getFrameMax: Int = delegate.getFrameMax - override def getHeartbeat: Int = delegate.getHeartbeat() + override def getHeartbeat: Int = delegate.getHeartbeat - override def getClientProperties: util.Map[String, AnyRef] = delegate.getClientProperties() + override def getClientProperties: util.Map[String, AnyRef] = delegate.getClientProperties - override def getClientProvidedName: String = delegate.getClientProvidedName() + override def getClientProvidedName: String = delegate.getClientProvidedName - override def getServerProperties: util.Map[String, AnyRef] = delegate.getServerProperties() + override def getServerProperties: util.Map[String, AnyRef] = delegate.getServerProperties override def createChannel(): Channel = delegate.createChannel() @@ -76,9 +76,9 @@ class AmqpProxyConnection(protected val delegate: Connection) extends Connection override def clearBlockedListeners(): Unit = delegate.clearBlockedListeners() - override def getExceptionHandler: ExceptionHandler = delegate.getExceptionHandler() + override def getExceptionHandler: ExceptionHandler = delegate.getExceptionHandler - override def getId: String = delegate.getId() + override def getId: String = delegate.getId override def setId(s: String): Unit = delegate.setId(s) @@ -88,9 +88,9 @@ class AmqpProxyConnection(protected val delegate: Connection) extends Connection override def removeShutdownListener(shutdownListener: ShutdownListener): Unit = delegate.removeShutdownListener(shutdownListener) - override def getCloseReason: ShutdownSignalException = delegate.getCloseReason() + override def getCloseReason: ShutdownSignalException = delegate.getCloseReason override def notifyListeners(): Unit = delegate.notifyListeners() - override def isOpen: Boolean = delegate.isOpen() + override def isOpen: Boolean = delegate.isOpen } diff --git a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala index 23be429ca..a28a6f417 100644 --- a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala +++ b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala @@ -55,7 +55,7 @@ class AmqpGraphStageLogicConnectionShutdownSpec val shutdownsAdded = new AtomicInteger() val shutdownsRemoved = new AtomicInteger() - override def beforeEach() = { + override def beforeEach(): Unit = { shutdownsAdded.set(0) shutdownsRemoved.set(0) } diff --git a/avroparquet/src/main/scala/org/apache/pekko/stream/connectors/avroparquet/impl/AvroParquetFlow.scala b/avroparquet/src/main/scala/org/apache/pekko/stream/connectors/avroparquet/impl/AvroParquetFlow.scala index 8b6cc6e28..2be8cf9a7 100644 --- a/avroparquet/src/main/scala/org/apache/pekko/stream/connectors/avroparquet/impl/AvroParquetFlow.scala +++ b/avroparquet/src/main/scala/org/apache/pekko/stream/connectors/avroparquet/impl/AvroParquetFlow.scala @@ -41,7 +41,6 @@ private[avroparquet] class AvroParquetFlow[T <: GenericRecord](writer: ParquetWr new InHandler { override def onUpstreamFinish(): Unit = - // super.onUpstreamFinish() completeStage() override def onUpstreamFailure(ex: Throwable): Unit = { diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala index c52de6acc..bfd7e4019 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala @@ -71,7 +71,7 @@ class AvroParquetFlowSpec val n: Int = 2 val file: String = genFinalFile.sample.get val documents: List[Document] = genDocuments(n).sample.get - val avroDocuments: List[Record] = documents.map(format.to(_)) + val avroDocuments: List[Record] = documents.map(format.to) val writer: ParquetWriter[Record] = parquetWriter[Record](file, conf, schema) // when diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala index 18b414e94..4ad331103 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala @@ -63,7 +63,7 @@ class AvroParquetSinkSpec val documents: List[Document] = genDocuments(n).sample.get val writer: ParquetWriter[Record] = parquetWriter[Record](file, conf, schema) // #init-sink - val records: List[Record] = documents.map(format.to(_)) + val records: List[Record] = documents.map(format.to) val source: Source[Record, NotUsed] = Source(records) val result: Future[Done] = source .runWith(AvroParquetSink(writer)) diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala index 2fa88db1e..2f1bf9a0e 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala @@ -69,7 +69,7 @@ class AvroParquetSourceSpec val n: Int = 4 val file: String = genFinalFile.sample.get val documents: List[Document] = genDocuments(n).sample.get - val avroDocuments: List[Record] = documents.map(format.to(_)) + val avroDocuments: List[Record] = documents.map(format.to) Source(avroDocuments) .toMat(AvroParquetSink(parquetWriter(file, conf, schema)))(Keep.right) .run() diff --git a/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishSettings.scala b/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishSettings.scala index 2ad901f7c..642a1c0d6 100644 --- a/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishSettings.scala +++ b/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishSettings.scala @@ -27,7 +27,7 @@ final class EventBridgePublishSettings private (val concurrency: Int) { def withConcurrency(concurrency: Int): EventBridgePublishSettings = copy(concurrency = concurrency) - def copy(concurrency: Int) = new EventBridgePublishSettings(concurrency) + def copy(concurrency: Int): EventBridgePublishSettings = new EventBridgePublishSettings(concurrency) override def toString: String = "EventBridgePublishSettings(" + diff --git a/aws-event-bridge/src/test/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishMockSpec.scala b/aws-event-bridge/src/test/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishMockSpec.scala index 35cf55e9e..453073498 100644 --- a/aws-event-bridge/src/test/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishMockSpec.scala +++ b/aws-event-bridge/src/test/scala/org/apache/pekko/stream/connectors/aws/eventbridge/EventBridgePublishMockSpec.scala @@ -32,7 +32,7 @@ class EventBridgePublishMockSpec extends AnyFlatSpec with DefaultTestContext wit private def entryDetail(detail: String, eventBusName: Option[String] = None): PutEventsRequestEntry = { val entry = PutEventsRequestEntry.builder().detail(detail) - eventBusName.map(entry.eventBusName(_)) + eventBusName.map(entry.eventBusName) entry.build() } @@ -129,7 +129,7 @@ class EventBridgePublishMockSpec extends AnyFlatSpec with DefaultTestContext wit } it should "fail stage if upstream failure occurs" in { - case class MyCustomException(message: String) extends Exception(message) + final case class MyCustomException(message: String) extends Exception(message) val (probe, future) = TestSource.probe[Seq[PutEventsRequestEntry]].via(EventBridgePublisher.flowSeq()).toMat(Sink.seq)(Keep.both).run() diff --git a/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/javadsl/AwsLambdaFlow.scala b/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/javadsl/AwsLambdaFlow.scala index a09355531..6bc681c4b 100644 --- a/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/javadsl/AwsLambdaFlow.scala +++ b/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/javadsl/AwsLambdaFlow.scala @@ -22,7 +22,7 @@ import software.amazon.awssdk.services.lambda.LambdaAsyncClient object AwsLambdaFlow { /** - * Java API: creates a [[AwsLambdaFlowStage]] for a AWS Lambda function invocation using an [[LambdaAsyncClient]] + * Java API: creates a [[AwsLambdaFlow]] for a AWS Lambda function invocation using an [[LambdaAsyncClient]] */ def create(awsLambdaClient: LambdaAsyncClient, parallelism: Int): Flow[InvokeRequest, InvokeResponse, NotUsed] = pekko.stream.connectors.awslambda.scaladsl.AwsLambdaFlow.apply(parallelism)(awsLambdaClient).asJava diff --git a/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala b/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala index 4ba10be35..323dbf10d 100644 --- a/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala +++ b/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala @@ -23,7 +23,7 @@ import software.amazon.awssdk.services.lambda.LambdaAsyncClient object AwsLambdaFlow { /** - * Scala API: creates a [[AwsLambdaFlowStage]] for a AWS Lambda function invocation using [[LambdaAsyncClient]] + * Scala API: creates a [[AwsLambdaFlow]] for a AWS Lambda function invocation using [[LambdaAsyncClient]] */ def apply( parallelism: Int)(implicit awsLambdaClient: LambdaAsyncClient): Flow[InvokeRequest, InvokeResponse, NotUsed] = diff --git a/awslambda/src/test/java/docs/javadsl/AwsLambdaFlowTest.java b/awslambda/src/test/java/docs/javadsl/AwsLambdaFlowTest.java index 2c3acaf5f..2a6f395f5 100644 --- a/awslambda/src/test/java/docs/javadsl/AwsLambdaFlowTest.java +++ b/awslambda/src/test/java/docs/javadsl/AwsLambdaFlowTest.java @@ -66,9 +66,7 @@ public void lambdaFlow() throws Exception { InvokeResponse invokeResponse = InvokeResponse.builder().build(); when(awsLambdaClient.invoke(eq(invokeRequest))) .thenAnswer( - invocation -> { - return CompletableFuture.completedFuture(invokeResponse); - }); + invocation -> CompletableFuture.completedFuture(invokeResponse)); Flow flow = AwsLambdaFlow.create(awsLambdaClient, 1); Source source = Source.single(invokeRequest); final CompletionStage> stage = diff --git a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala index 4cad3b954..3d0d62f1b 100644 --- a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala +++ b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala @@ -25,7 +25,6 @@ import pekko.testkit.TestKit import org.mockito.ArgumentMatchers.{ any => mockitoAny, eq => mockitoEq } import org.mockito.Mockito._ import org.mockito.invocation.InvocationOnMock -import org.mockito.stubbing.Answer import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers @@ -72,10 +71,8 @@ class AwsLambdaFlowSpec "call a single invoke request" in assertAllStagesStopped { when( - awsLambdaClient.invoke(mockitoEq(invokeRequest))).thenAnswer(new Answer[CompletableFuture[InvokeResponse]] { - override def answer(invocation: InvocationOnMock): CompletableFuture[InvokeResponse] = - CompletableFuture.completedFuture(invokeResponse) - }) + awsLambdaClient.invoke(mockitoEq(invokeRequest))).thenAnswer((invocation: InvocationOnMock) => + CompletableFuture.completedFuture(invokeResponse)) val (probe, future) = TestSource.probe[InvokeRequest].via(lambdaFlow).toMat(Sink.seq)(Keep.both).run() probe.sendNext(invokeRequest) @@ -89,13 +86,11 @@ class AwsLambdaFlowSpec "call with exception" in assertAllStagesStopped { when( - awsLambdaClient.invoke(mockitoAny[InvokeRequest]())).thenAnswer(new Answer[CompletableFuture[InvokeResponse]] { - override def answer(invocation: InvocationOnMock): CompletableFuture[InvokeResponse] = { - val exception = new RuntimeException("Error in lambda") - val future = new CompletableFuture[InvokeResponse]() - future.completeExceptionally(exception) - future - } + awsLambdaClient.invoke(mockitoAny[InvokeRequest]())).thenAnswer((invocation: InvocationOnMock) => { + val exception = new RuntimeException("Error in lambda") + val future = new CompletableFuture[InvokeResponse]() + future.completeExceptionally(exception) + future }) val (probe, future) = TestSource.probe[InvokeRequest].via(lambdaFlow).toMat(Sink.seq)(Keep.both).run() diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala index ff882cc00..9b86e6063 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala @@ -54,9 +54,8 @@ import scala.collection.mutable.Queue if (res.isEmpty) { settings.retrieveRetryTimeout match { case Some(timeout) => - if (isAvailable(out)) { + if (isAvailable(out)) scheduleOnce(NotUsed, timeout) - } case None => complete(out) } } else { @@ -69,11 +68,10 @@ import scala.collection.mutable.Queue out, new OutHandler { override def onPull(): Unit = - if (!buffer.isEmpty) { + if (buffer.nonEmpty) push(out, buffer.dequeue()) - } else { + else retrieveMessages() - } }) } } diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/model.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/model.scala index 6f75aaf2e..82ffbaedf 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/model.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/model.scala @@ -30,7 +30,7 @@ object DeleteOrUpdateMessage { } object UpdateVisibility { - def apply(timeout: Int) = + def apply(timeout: Int): UpdateVisibility = new UpdateVisibility(timeout) } diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala index ffaa8bbac..e80321e83 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala @@ -23,7 +23,7 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } /** * Settings for AzureQueueSource * - * @param initalVisibilityTimeout Specifies how many seconds a message becomes invisible after it has been dequeued. + * @param initialVisibilityTimeout Specifies how many seconds a message becomes invisible after it has been dequeued. * See parameter of the same name in [[com.microsoft.azure.storage.queue.CloudQueue$.retrieveMessages]]. * @param batchSize Specifies how many message are fetched in one batch. * (This is the numberOfMessages parameter in [[com.microsoft.azure.storage.queue.CloudQueue$.retrieveMessages]].) @@ -45,7 +45,7 @@ final class AzureQueueSourceSettings private ( def withRetrieveRetryTimeout(retrieveRetryTimeout: FiniteDuration): AzureQueueSourceSettings = copy(retrieveRetryTimeout = Some(retrieveRetryTimeout)) - def withRetrieveRetryTimeout(retrieveRetryTimeout: JavaDuration) = + def withRetrieveRetryTimeout(retrieveRetryTimeout: JavaDuration): AzureQueueSourceSettings = copy(retrieveRetryTimeout = Some(Duration.fromNanos(retrieveRetryTimeout.toNanos))) /** diff --git a/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala b/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala index d662f764f..8d56b412b 100644 --- a/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala +++ b/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala @@ -46,8 +46,8 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with val queue = queueClient.getQueueReference(queueName) queue } - val queueFactory = () => queueOpt.get - def queue = queueFactory() + val queueFactory: () => CloudQueue = () => queueOpt.get + def queue: CloudQueue = queueFactory() override def withFixture(test: NoArgAsyncTest): FutureOutcome = { assume(queueOpt.isDefined, "Queue is not defined. Please set AZURE_CONNECTION_STRING") @@ -71,7 +71,7 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with message } - def assertCannotGetMessageFromQueue = + def assertCannotGetMessageFromQueue: Assertion = assert(queue.peekMessage() == null) "AzureQueueSource" should "be able to retrieve messages" in assertAllStagesStopped { diff --git a/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraMetricsRegistry-more-specific-type.backwards.excludes b/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraMetricsRegistry-more-specific-type.backwards.excludes new file mode 100644 index 000000000..44f063b52 --- /dev/null +++ b/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraMetricsRegistry-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.cassandra.CassandraMetricsRegistry.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.cassandra.CassandraMetricsRegistry.lookup") diff --git a/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraSessionRegistry-more-specific-type.backwards.excludes b/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraSessionRegistry-more-specific-type.backwards.excludes new file mode 100644 index 000000000..50b31716b --- /dev/null +++ b/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/CassandraSessionRegistry-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.cassandra.scaladsl.CassandraSessionRegistry.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.cassandra.scaladsl.CassandraSessionRegistry.lookup") diff --git a/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..fb5811c7a --- /dev/null +++ b/cassandra/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.cassandra.scaladsl.CassandraSessionRegistry$SessionKey") diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraMetricsRegistry.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraMetricsRegistry.scala index a89ee69a2..666897e0b 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraMetricsRegistry.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraMetricsRegistry.scala @@ -44,7 +44,7 @@ class CassandraMetricsRegistry extends Extension { } object CassandraMetricsRegistry extends ExtensionId[CassandraMetricsRegistry] with ExtensionIdProvider { - override def lookup = CassandraMetricsRegistry + override def lookup: CassandraMetricsRegistry.type = CassandraMetricsRegistry override def createExtension(system: ExtendedActorSystem) = new CassandraMetricsRegistry diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala index 5a695df2c..0a1935891 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala @@ -63,7 +63,7 @@ class CassandraSessionSettings private (val configPath: String, object CassandraSessionSettings { - val ConfigPath = "pekko.connectors.cassandra" + val ConfigPath: String = "pekko.connectors.cassandra" def apply(): CassandraSessionSettings = apply(ConfigPath) diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala index 35ba8cfef..7520e7659 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala @@ -54,15 +54,14 @@ class DefaultSessionProvider(system: ActorSystem, config: Config) extends CqlSes */ private def usePekkoDiscovery(config: Config): Boolean = config.getString("service-discovery.name").nonEmpty - override def connect()(implicit ec: ExecutionContext): Future[CqlSession] = { - if (usePekkoDiscovery(config)) { + override def connect()(implicit ec: ExecutionContext): Future[CqlSession] = + if (usePekkoDiscovery(config)) PekkoDiscoverySessionProvider.connect(system, config) - } else { + else { val driverConfig = CqlSessionProvider.driverConfig(system, config) val driverConfigLoader = DriverConfigLoaderFromConfig.fromConfig(driverConfig) CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().asScala } - } } object CqlSessionProvider { @@ -75,7 +74,7 @@ object CqlSessionProvider { */ def apply(system: ExtendedActorSystem, config: Config): CqlSessionProvider = { val className = config.getString("session-provider") - val dynamicAccess = system.asInstanceOf[ExtendedActorSystem].dynamicAccess + val dynamicAccess = system.dynamicAccess val clazz = dynamicAccess.getClassFor[CqlSessionProvider](className).get def instantiate(args: immutable.Seq[(Class[_], AnyRef)]) = dynamicAccess.createInstanceFor[CqlSessionProvider](clazz, args) @@ -83,9 +82,9 @@ object CqlSessionProvider { val params = List((classOf[ActorSystem], system), (classOf[Config], config)) instantiate(params) .recoverWith { - case x: NoSuchMethodException => instantiate(params.take(1)) + case _: NoSuchMethodException => instantiate(params.take(1)) } - .recoverWith { case x: NoSuchMethodException => instantiate(Nil) } + .recoverWith { case _: NoSuchMethodException => instantiate(Nil) } .recoverWith { case ex: Exception => Failure( diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/DriverConfigLoaderFromConfig.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/DriverConfigLoaderFromConfig.scala index 657b4ed15..f9deda4b9 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/DriverConfigLoaderFromConfig.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/DriverConfigLoaderFromConfig.scala @@ -41,9 +41,7 @@ class DriverConfigLoaderFromConfig(config: Config) extends DriverConfigLoader { private val driverConfig: DriverConfig = new TypesafeDriverConfig(config) - override def getInitialConfig: DriverConfig = { - driverConfig - } + override def getInitialConfig: DriverConfig = driverConfig override def onDriverInit(context: DriverContext): Unit = () diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala index 5feaefac4..4898370e7 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala @@ -30,7 +30,7 @@ import scala.concurrent.{ ExecutionContext, Future } * [[https://pekko.apache.org/docs/pekko/current/discovery/index.html Pekko Discovery]] * is enabled by setting the `service-discovery.name` in the given `CassandraSession` config. * - * Pekko Discovery overwrites the basic.contact-points` from the configuration with addresses + * Pekko Discovery overwrites the `basic.contact-points` from the configuration with addresses * provided by the configured Pekko Discovery mechanism. * * Example using config-based Pekko Discovery: @@ -66,7 +66,7 @@ import scala.concurrent.{ ExecutionContext, Future } */ private[cassandra] object PekkoDiscoverySessionProvider { - def connect(system: ActorSystem, config: Config)(implicit ec: ExecutionContext): Future[CqlSession] = { + def connect(system: ActorSystem, config: Config)(implicit ec: ExecutionContext): Future[CqlSession] = readNodes(config)(system, ec).flatMap { contactPoints => val driverConfigWithContactPoints = ConfigFactory.parseString(s""" basic.contact-points = [${contactPoints.mkString("\"", "\", \"", "\"")}] @@ -74,7 +74,6 @@ private[cassandra] object PekkoDiscoverySessionProvider { val driverConfigLoader = DriverConfigLoaderFromConfig.fromConfig(driverConfigWithContactPoints) CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().asScala } - } def connect(system: ClassicActorSystemProvider, config: Config)(implicit ec: ExecutionContext): Future[CqlSession] = connect(system.classicSystem, config) @@ -96,7 +95,7 @@ private[cassandra] object PekkoDiscoverySessionProvider { private def readNodes( serviceName: String, lookupTimeout: FiniteDuration)( - implicit system: ActorSystem, ec: ExecutionContext): Future[immutable.Seq[String]] = { + implicit system: ActorSystem, ec: ExecutionContext): Future[immutable.Seq[String]] = Discovery(system).discovery.lookup(serviceName, lookupTimeout).map { resolved => resolved.addresses.map { target => target.host + ":" + target.port.getOrElse { @@ -105,6 +104,5 @@ private[cassandra] object PekkoDiscoverySessionProvider { } } } - } } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraFlow.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraFlow.scala index e9062138f..c43413861 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraFlow.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraFlow.scala @@ -60,12 +60,11 @@ object CassandraFlow { writeSettings: CassandraWriteSettings, cqlStatement: String, statementBinder: pekko.japi.Function2[T, PreparedStatement, BoundStatement]) - : FlowWithContext[T, Ctx, T, Ctx, NotUsed] = { + : FlowWithContext[T, Ctx, T, Ctx, NotUsed] = scaladsl.CassandraFlow .withContext(writeSettings, cqlStatement, (t, preparedStatement) => statementBinder.apply(t, preparedStatement))( session.delegate) .asJava - } /** * Creates a flow that uses [[com.datastax.oss.driver.api.core.cql.BatchStatement]] and groups the @@ -92,13 +91,12 @@ object CassandraFlow { writeSettings: CassandraWriteSettings, cqlStatement: String, statementBinder: (T, PreparedStatement) => BoundStatement, - groupingKey: pekko.japi.Function[T, K]): Flow[T, T, NotUsed] = { + groupingKey: pekko.japi.Function[T, K]): Flow[T, T, NotUsed] = scaladsl.CassandraFlow .createBatch(writeSettings, cqlStatement, (t, preparedStatement) => statementBinder.apply(t, preparedStatement), t => groupingKey.apply(t))(session.delegate) .asJava - } } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraFlow.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraFlow.scala index 2281f614a..e239ec19a 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraFlow.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraFlow.scala @@ -42,7 +42,7 @@ object CassandraFlow { writeSettings: CassandraWriteSettings, cqlStatement: String, statementBinder: (T, PreparedStatement) => BoundStatement)( - implicit session: CassandraSession): Flow[T, T, NotUsed] = { + implicit session: CassandraSession): Flow[T, T, NotUsed] = Flow .lazyFutureFlow { () => val prepare = session.prepare(cqlStatement) @@ -55,7 +55,6 @@ object CassandraFlow { }(session.ec) } .mapMaterializedValue(_ => NotUsed) - } /** * A flow writing to Cassandra for every stream element, passing context along. @@ -72,7 +71,7 @@ object CassandraFlow { writeSettings: CassandraWriteSettings, cqlStatement: String, statementBinder: (T, PreparedStatement) => BoundStatement)( - implicit session: CassandraSession): FlowWithContext[T, Ctx, T, Ctx, NotUsed] = { + implicit session: CassandraSession): FlowWithContext[T, Ctx, T, Ctx, NotUsed] = FlowWithContext.fromTuples { Flow .lazyFutureFlow { () => @@ -88,7 +87,6 @@ object CassandraFlow { } .mapMaterializedValue(_ => NotUsed) } - } /** * Creates a flow that uses [[com.datastax.oss.driver.api.core.cql.BatchStatement]] and groups the @@ -114,7 +112,7 @@ object CassandraFlow { def createBatch[T, K](writeSettings: CassandraWriteSettings, cqlStatement: String, statementBinder: (T, PreparedStatement) => BoundStatement, - groupingKey: T => K)(implicit session: CassandraSession): Flow[T, T, NotUsed] = { + groupingKey: T => K)(implicit session: CassandraSession): Flow[T, T, NotUsed] = Flow .lazyFutureFlow { () => val prepareStatement: Future[PreparedStatement] = session.prepare(cqlStatement) @@ -132,5 +130,4 @@ object CassandraFlow { }(session.ec) } .mapMaterializedValue(_ => NotUsed) - } } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala index 5d77b0113..774776b13 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala @@ -94,7 +94,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, /** * Meta data about the Cassandra server, such as its version. */ - def serverMetaData: Future[CassandraServerMetaData] = { + def serverMetaData: Future[CassandraServerMetaData] = cachedServerMetaData match { case OptionVal.Some(cached) => cached @@ -122,7 +122,6 @@ final class CassandraSession(system: pekko.actor.ActorSystem, result case other => throw new MatchError(other) } - } /** * Execute CQL commands @@ -171,11 +170,10 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * The returned `Future` is completed when the statement has been * successfully executed, or if it fails. */ - def executeWrite(stmt: Statement[_]): Future[Done] = { + def executeWrite(stmt: Statement[_]): Future[Done] = underlying().flatMap { cqlSession => cqlSession.executeAsync(stmt).asScala.map(_ => Done) } - } /** * Prepare, bind and execute one statement in one go. @@ -187,18 +185,16 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * The returned `Future` is completed when the statement has been * successfully executed, or if it fails. */ - def executeWrite(stmt: String, bindValues: AnyRef*): Future[Done] = { + def executeWrite(stmt: String, bindValues: AnyRef*): Future[Done] = bind(stmt, bindValues).flatMap(b => executeWrite(b)) - } /** * INTERNAL API */ - @InternalApi private[pekko] def selectResultSet(stmt: Statement[_]): Future[AsyncResultSet] = { + @InternalApi private[pekko] def selectResultSet(stmt: Statement[_]): Future[AsyncResultSet] = underlying().flatMap { s => s.executeAsync(stmt).asScala } - } /** * Execute a select statement. First you must `prepare` the @@ -212,7 +208,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * Note that you have to connect a `Sink` that consumes the messages from * this `Source` and then `run` the stream. */ - def select(stmt: Statement[_]): Source[Row, NotUsed] = { + def select(stmt: Statement[_]): Source[Row, NotUsed] = Source .futureSource { underlying().map { cqlSession => @@ -220,7 +216,6 @@ final class CassandraSession(system: pekko.actor.ActorSystem, } } .mapMaterializedValue(_ => NotUsed) - } /** * Execute a select statement created by `prepare`. @@ -233,7 +228,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * Note that you have to connect a `Sink` that consumes the messages from * this `Source` and then `run` the stream. */ - def select(stmt: Future[Statement[_]]): Source[Row, NotUsed] = { + def select(stmt: Future[Statement[_]]): Source[Row, NotUsed] = Source .futureSource { underlying().flatMap(cqlSession => stmt.map(cqlSession -> _)).map { @@ -242,7 +237,6 @@ final class CassandraSession(system: pekko.actor.ActorSystem, } } .mapMaterializedValue(_ => NotUsed) - } /** * Prepare, bind and execute a select statement in one go. @@ -254,9 +248,8 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * Note that you have to connect a `Sink` that consumes the messages from * this `Source` and then `run` the stream. */ - def select(stmt: String, bindValues: AnyRef*): Source[Row, NotUsed] = { + def select(stmt: String, bindValues: AnyRef*): Source[Row, NotUsed] = select(bind(stmt, bindValues)) - } /** * Execute a select statement. First you must `prepare` the statement and @@ -269,11 +262,10 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * * The returned `Future` is completed with the found rows. */ - def selectAll(stmt: Statement[_]): Future[immutable.Seq[Row]] = { + def selectAll(stmt: Statement[_]): Future[immutable.Seq[Row]] = select(stmt) .runWith(Sink.seq) .map(_.toVector) // Sink.seq returns Seq, not immutable.Seq (compilation issue in Eclipse) - } /** * Prepare, bind and execute a select statement in one go. Only use this method @@ -284,9 +276,8 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * * The returned `Future` is completed with the found rows. */ - def selectAll(stmt: String, bindValues: AnyRef*): Future[immutable.Seq[Row]] = { + def selectAll(stmt: String, bindValues: AnyRef*): Future[immutable.Seq[Row]] = bind(stmt, bindValues).flatMap(bs => selectAll(bs)) - } /** * Execute a select statement that returns one row. First you must `prepare` the @@ -298,11 +289,10 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * The returned `Future` is completed with the first row, * if any. */ - def selectOne(stmt: Statement[_]): Future[Option[Row]] = { + def selectOne(stmt: Statement[_]): Future[Option[Row]] = selectResultSet(stmt).map { rs => Option(rs.one()) // rs.one returns null if exhausted } - } /** * Prepare, bind and execute a select statement that returns one row. @@ -312,15 +302,13 @@ final class CassandraSession(system: pekko.actor.ActorSystem, * The returned `Future` is completed with the first row, * if any. */ - def selectOne(stmt: String, bindValues: AnyRef*): Future[Option[Row]] = { + def selectOne(stmt: String, bindValues: AnyRef*): Future[Option[Row]] = bind(stmt, bindValues).flatMap(bs => selectOne(bs)) - } - private def bind(stmt: String, bindValues: Seq[AnyRef]): Future[BoundStatement] = { + private def bind(stmt: String, bindValues: Seq[AnyRef]): Future[BoundStatement] = prepare(stmt).map { ps => if (bindValues.isEmpty) ps.bind() else ps.bind(bindValues: _*) } - } } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionRegistry.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionRegistry.scala index 346390cee..63d209e73 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionRegistry.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionRegistry.scala @@ -40,10 +40,10 @@ object CassandraSessionRegistry extends ExtensionId[CassandraSessionRegistry] wi def createExtension(system: ClassicActorSystemProvider): CassandraSessionRegistry = createExtension(system.classicSystem.asInstanceOf[ExtendedActorSystem]) - override def lookup: ExtensionId[CassandraSessionRegistry] = this + override def lookup: CassandraSessionRegistry.type = CassandraSessionRegistry /** Hash key for `sessions`. */ - private case class SessionKey(configPath: String) + private final case class SessionKey(configPath: String) private def sessionKey(settings: CassandraSessionSettings) = SessionKey(settings.configPath) } @@ -83,9 +83,8 @@ final class CassandraSessionRegistry(system: ExtendedActorSystem) extends Extens * Note that the session must not be stopped manually, it is shut down when the actor system is shutdown, * if you need a more fine grained life cycle control, create the CassandraSession manually instead. */ - def sessionFor(settings: CassandraSessionSettings): CassandraSession = { + def sessionFor(settings: CassandraSessionSettings): CassandraSession = sessionFor(settings, system.settings.config.getConfig(settings.configPath)) - } /** * INTERNAL API: Possibility to initialize the `SessionProvider` with a custom `Config` diff --git a/cassandra/src/test/java/docs/javadsl/CassandraFlowTest.java b/cassandra/src/test/java/docs/javadsl/CassandraFlowTest.java index 5a6c562ce..47a62e97c 100644 --- a/cassandra/src/test/java/docs/javadsl/CassandraFlowTest.java +++ b/cassandra/src/test/java/docs/javadsl/CassandraFlowTest.java @@ -188,7 +188,7 @@ public void withContextUsage() throws InterruptedException, ExecutionException, .map(row -> new Person(row.getInt("id"), row.getString("name"), row.getString("city"))) .runWith(Sink.seq(), system); List rows = await(select); - assertThat(new ArrayList<>(rows), hasItems(persons.stream().map(p -> p.first()).toArray())); + assertThat(new ArrayList<>(rows), hasItems(persons.stream().map(Pair::first).toArray())); } public static final class Person { diff --git a/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala b/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala index 3ba096514..0e19d691b 100644 --- a/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala +++ b/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala @@ -82,7 +82,7 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec import pekko.stream.connectors.cassandra.scaladsl.CassandraFlow import com.datastax.oss.driver.api.core.cql.{ BoundStatement, PreparedStatement } - case class Person(id: Int, name: String, city: String) + final case class Person(id: Int, name: String, city: String) val persons = immutable.Seq(Person(12, "John", "London"), Person(43, "Umberto", "Roma"), Person(56, "James", "Chicago")) @@ -98,7 +98,7 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec .runWith(Sink.seq) // #prepared - written.futureValue must have size (persons.size) + written.futureValue must have size persons.size val rows = CassandraSource(s"SELECT * FROM $table") .map { row => @@ -120,8 +120,8 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec |);""".stripMargin) }.futureValue mustBe Done - case class Person(id: Int, name: String, city: String) - case class AckHandle(id: Int) { + final case class Person(id: Int, name: String, city: String) + final case class AckHandle(id: Int) { def ack(): Future[Done] = Future.successful(Done) } val persons = @@ -170,7 +170,7 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec |);""".stripMargin) }.futureValue mustBe Done - case class Person(id: Int, name: String, city: String) + final case class Person(id: Int, name: String, city: String) val persons = immutable.Seq(Person(12, "John", "London"), Person(43, "Umberto", "Roma"), Person(56, "James", "Chicago")) diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala index 0aeec5fe1..89138511c 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala @@ -46,7 +46,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr private val dataTableName = "testcounts" lazy val dataTable = s"$keyspaceName.$dataTableName" - def insertDataTable() = { + def insertDataTable() = withSchemaMetadataDisabled { for { _ <- lifecycleSession.executeDDL(s"""CREATE TABLE IF NOT EXISTS $dataTable ( @@ -66,7 +66,6 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr s"INSERT INTO $dataTable (partition, key, count) VALUES ('B', 'f', 6);")) } yield Done }.futureValue mustBe Done - } override def beforeAll(): Unit = { super.beforeAll() diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala index 01d67fd96..d50035189 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala @@ -40,19 +40,17 @@ trait CassandraLifecycleBase { session.executeWriteBatch(batch.build()) } - def executeCql(session: CassandraSession, statements: immutable.Seq[String]): Future[Done] = { + def executeCql(session: CassandraSession, statements: immutable.Seq[String]): Future[Done] = execute(session, statements.map(stmt => SimpleStatement.newInstance(stmt))) - } private val keyspaceTimeout = java.time.Duration.ofSeconds(15) - def createKeyspace(session: CassandraSession, name: String): Future[Done] = { + def createKeyspace(session: CassandraSession, name: String): Future[Done] = session.executeWrite( new SimpleStatementBuilder( s"""CREATE KEYSPACE $name WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '1'};""").setTimeout( keyspaceTimeout) .build()) - } def dropKeyspace(session: CassandraSession, name: String): Future[Done] = session.executeWrite( diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala index 00fb120c6..87bb6756f 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala @@ -45,7 +45,7 @@ final class CassandraSessionPerformanceSpec extends CassandraSpecBase(ActorSyste // only using one primary key in this test private val id = "1" - def insertDataTable() = { + def insertDataTable() = lifecycleSession .executeDDL(s"""CREATE TABLE IF NOT EXISTS $dataTable ( | partition_id bigint, @@ -66,7 +66,6 @@ final class CassandraSessionPerformanceSpec extends CassandraSpecBase(ActorSyste .runWith(Sink.ignore) } .futureValue - } override def beforeAll(): Unit = { super.beforeAll() diff --git a/couchbase/src/main/mima-filters/1.1.x.backwards.excludes/CouchbaseSessionRegistry-more-specific-type.backwards.excludes b/couchbase/src/main/mima-filters/1.1.x.backwards.excludes/CouchbaseSessionRegistry-more-specific-type.backwards.excludes new file mode 100644 index 000000000..a6cc9c8e4 --- /dev/null +++ b/couchbase/src/main/mima-filters/1.1.x.backwards.excludes/CouchbaseSessionRegistry-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.couchbase.CouchbaseSessionRegistry.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.couchbase.CouchbaseSessionRegistry.lookup") diff --git a/couchbase/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/couchbase/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..a9c55e67b --- /dev/null +++ b/couchbase/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.couchbase.CouchbaseSessionRegistry$SessionKey") diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala index 646c19b27..e51176b36 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala @@ -47,9 +47,9 @@ object CouchbaseSessionRegistry extends ExtensionId[CouchbaseSessionRegistry] wi override def get(system: pekko.actor.ActorSystem): CouchbaseSessionRegistry = super.apply(system) - override def lookup: ExtensionId[CouchbaseSessionRegistry] = this + override def lookup: CouchbaseSessionRegistry.type = CouchbaseSessionRegistry - private case class SessionKey(settings: CouchbaseSessionSettings, bucketName: String) + private final case class SessionKey(settings: CouchbaseSessionSettings, bucketName: String) } final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extension { @@ -103,10 +103,9 @@ final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extens ExecutionContexts.parasitic) promise.completeWith(session) promise.future - } else { + } else // we lost cas (could be concurrent call for some other key though), retry startSession(key) - } } } diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseClusterRegistry.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseClusterRegistry.scala index 919ff5459..29aa60daf 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseClusterRegistry.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseClusterRegistry.scala @@ -64,10 +64,9 @@ final private[couchbase] class CouchbaseClusterRegistry(system: ActorSystem) { }(system.dispatcher) } future - } else { + } else // we lost cas (could be concurrent call for some other settings though), retry createClusterClient(settings) - } } } diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionImpl.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionImpl.scala index a4584b1f0..294e07258 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionImpl.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionImpl.scala @@ -159,9 +159,8 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl case None => Future.successful(Done) } }(ExecutionContexts.global()) - } else { + } else Future.successful(Done) - } override def toString: String = s"CouchbaseSession(${asyncBucket.name()})" diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/RxUtilities.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/RxUtilities.scala index 336f8d140..d9745f3bc 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/RxUtilities.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/RxUtilities.scala @@ -29,22 +29,15 @@ import scala.concurrent.{ Future, Promise } @InternalApi private[couchbase] object RxUtilities { - val unfoldDocument = new Func1[AsyncN1qlQueryRow, JsonObject] { - def call(row: AsyncN1qlQueryRow): JsonObject = - row.value() - } + val unfoldDocument: Func1[AsyncN1qlQueryRow, JsonObject] = (row: AsyncN1qlQueryRow) => row.value() - val failStreamOnError = new Func1[JsonObject, Observable[JsonObject]] { - override def call(err: JsonObject): Observable[JsonObject] = - Observable.error(CouchbaseResponseException(err)) - } + val failStreamOnError: Func1[JsonObject, Observable[JsonObject]] = + (err: JsonObject) => Observable.error(CouchbaseResponseException(err)) - val unfoldJsonObjects = new Func1[AsyncN1qlQueryResult, Observable[JsonObject]] { - def call(t: AsyncN1qlQueryResult): Observable[JsonObject] = { - val data: Observable[JsonObject] = t.rows().map(unfoldDocument) - val errors = t.errors().flatMap(failStreamOnError) - data.mergeWith(errors) - } + val unfoldJsonObjects: Func1[AsyncN1qlQueryResult, Observable[JsonObject]] = (t: AsyncN1qlQueryResult) => { + val data: Observable[JsonObject] = t.rows().map(unfoldDocument) + val errors = t.errors().flatMap(failStreamOnError) + data.mergeWith(errors) } def singleObservableToFuture[T](o: Observable[T], id: Any): Future[T] = { @@ -68,12 +61,12 @@ private[couchbase] object RxUtilities { p.future } - def func1Observable[T, R](fun: T => Observable[R]) = + def func1Observable[T, R](fun: T => Observable[R]): Func1[T, Observable[R]] = new Func1[T, Observable[R]]() { override def call(b: T): Observable[R] = fun(b) } - def func1[T, R](fun: T => R) = + def func1[T, R](fun: T => R): Func1[T, R] = new Func1[T, R]() { override def call(b: T): R = fun(b) } diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala index c1d26cf03..0027075da 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala @@ -274,7 +274,7 @@ abstract class CouchbaseSession { * is set to true. * @param fields the JSON fields to index - each can be either `String` or [[com.couchbase.client.java.query.dsl.Expression]] * @return a [[java.util.concurrent.CompletionStage]] of `true` if the index was/will be effectively created, `false` - * if the index existed and ignoreIfExist` is true. Completion of the `CompletionStage` does not guarantee the index + * if the index existed and `ignoreIfExist` is true. Completion of the `CompletionStage` does not guarantee the index * is online and ready to be used. */ def createIndex(indexName: String, ignoreIfExist: Boolean, fields: AnyRef*): CompletionStage[Boolean] diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala index b40e87582..0bf79b1a1 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala @@ -37,7 +37,7 @@ object CouchbaseWriteSettings { /** * Simple settings not requiring replication nor persistence. */ - val inMemory = CouchbaseWriteSettings(1, ReplicateTo.NONE, PersistTo.NONE, 2.seconds) + val inMemory: CouchbaseWriteSettings = CouchbaseWriteSettings(1, ReplicateTo.NONE, PersistTo.NONE, 2.seconds) def apply(): CouchbaseWriteSettings = inMemory diff --git a/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala b/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala index d0eb150ba..0fdb15629 100644 --- a/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala +++ b/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala @@ -34,7 +34,7 @@ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import scala.concurrent.{ Await, Future } -case class TestObject(id: String, value: String) +final case class TestObject(id: String, value: String) private[couchbase] object CouchbaseSupport { val jacksonMapper = JsonMapper.builder() diff --git a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala index d81dc4a9e..e70a323f8 100644 --- a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala +++ b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala @@ -91,9 +91,7 @@ class CsvBench { } @TearDown - def tearDown(): Unit = { - system.terminate() - } + def tearDown(): Unit = system.terminate() @Setup def setup(): Unit = { diff --git a/csv/src/main/java/org/apache/pekko/stream/connectors/csv/javadsl/CsvQuotingStyle.java b/csv/src/main/java/org/apache/pekko/stream/connectors/csv/javadsl/CsvQuotingStyle.java index 2804e7ff5..5f697faff 100644 --- a/csv/src/main/java/org/apache/pekko/stream/connectors/csv/javadsl/CsvQuotingStyle.java +++ b/csv/src/main/java/org/apache/pekko/stream/connectors/csv/javadsl/CsvQuotingStyle.java @@ -19,5 +19,5 @@ public enum CsvQuotingStyle { ALWAYS, /** Quote only fields requiring quotes */ - REQUIRED; + REQUIRED } diff --git a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvFormatter.scala b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvFormatter.scala index 36f248a74..222ce37c4 100644 --- a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvFormatter.scala +++ b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvFormatter.scala @@ -65,45 +65,39 @@ import scala.collection.immutable while (index > -1) { builder ++= ByteString.apply(field.substring(lastIndex, index), charsetName) val char = field.charAt(index) - if (char == quoteChar) { + if (char == quoteChar) builder ++= duplicatedQuote - } else { + else builder ++= duplicatedEscape - } lastIndex = index + 1 index = indexOfQuoteOrEscape(lastIndex) } - if (lastIndex < field.length) { + if (lastIndex < field.length) builder ++= ByteString(field.substring(lastIndex), charsetName) - } } def append(field: String) = { val (quoteIt, splitAt) = requiresQuotesOrSplit(field) if (quoteIt || quotingStyle == CsvQuotingStyle.Always) { builder ++= quoteBs - if (splitAt != -1) { + if (splitAt != -1) splitAndDuplicateQuotesAndEscapes(field, splitAt) - } else { + else builder ++= ByteString(field, charsetName) - } builder ++= quoteBs - } else { + } else builder ++= ByteString(field, charsetName) - } } val iterator = fields.iterator var hasNext = iterator.hasNext while (hasNext) { val next = iterator.next() - if (next != null) { + if (next != null) append(next.toString) - } hasNext = iterator.hasNext - if (hasNext) { + if (hasNext) builder ++= delimiterBs - } } builder ++= endOfLineBs builder.result() diff --git a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParser.scala b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParser.scala index 2c229af33..1a837e11d 100644 --- a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParser.scala +++ b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParser.scala @@ -128,9 +128,8 @@ import scala.collection.mutable val line = maybeExtractLine(requireLineEnd) if (line.nonEmpty) { currentLineNo += 1 - if (state == LineEnd || !requireLineEnd) { + if (state == LineEnd || !requireLineEnd) state = LineStart - } resetLine() columns.clear() } @@ -147,7 +146,7 @@ import scala.collection.mutable lineBytesDropped = 0 } - private[this] def dropReadBuffer() = { + private[this] def dropReadBuffer(): Unit = { buffer = buffer.drop(pos) lineBytesDropped += pos pos = 0 @@ -186,13 +185,13 @@ import scala.collection.mutable } - private[this] def noCharEscaped() = + private[this] def noCharEscaped(): Nothing = throw new MalformedCsvException(currentLineNo, lineLength, s"wrong escaping at $currentLineNo:$lineLength, no character after escape") private[this] def checkForByteOrderMark(): Unit = - if (buffer.length >= 2) { + if (buffer.length >= 2) if (buffer.startsWith(ByteOrderMark.UTF_8)) { advance(3) fieldStart = 3 @@ -207,7 +206,6 @@ import scala.collection.mutable throw new UnsupportedCharsetException("UTF-32 BE") } } - } private[this] def parseLine(): Unit = { if (firstData) { @@ -330,7 +328,7 @@ import scala.collection.mutable lineLength, s"wrong escaping at $currentLineNo:$lineLength, quote is escaped as ${quoteChar.toChar}${quoteChar.toChar}") - case b => + case _ => fieldBuilder.add(escapeChar) state = WithinField @@ -375,7 +373,7 @@ import scala.collection.mutable state = WithinQuotedField advance() - case b => + case _ => fieldBuilder.add(escapeChar) state = WithinQuotedField } @@ -387,7 +385,7 @@ import scala.collection.mutable state = WithinQuotedField advance() - case b => + case _ => state = WithinField } diff --git a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParsingStage.scala b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParsingStage.scala index 8b2f89e83..7da0d706f 100644 --- a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParsingStage.scala +++ b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvParsingStage.scala @@ -34,11 +34,11 @@ import scala.util.control.NonFatal private val in = Inlet[ByteString](Logging.simpleName(this) + ".in") private val out = Outlet[List[ByteString]](Logging.simpleName(this) + ".out") - override val shape = FlowShape(in, out) + override val shape: FlowShape[ByteString, List[ByteString]] = FlowShape(in, out) override protected def initialAttributes: Attributes = Attributes.name("CsvParsing") - override def createLogic(inheritedAttributes: Attributes) = + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic with InHandler with OutHandler = new GraphStageLogic(shape) with InHandler with OutHandler { private[this] val buffer = new CsvParser(delimiter, quoteChar, escapeChar, maximumLineLength) @@ -57,7 +57,7 @@ import scala.util.control.NonFatal completeStage() } - private def tryPollBuffer() = + private def tryPollBuffer(): Unit = try buffer.poll(requireLineEnd = true) match { case Some(csvLine) => push(out, csvLine) case _ => diff --git a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapJavaStage.scala b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapJavaStage.scala index 3bdd4ec9f..bba354e9a 100644 --- a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapJavaStage.scala +++ b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapJavaStage.scala @@ -23,7 +23,8 @@ import pekko.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import pekko.util.ByteString /** - * Internal Java API: Converts incoming {@link Collection}<{@link ByteString}> to {@link java.util.Map}. + * Internal Java API: Converts incoming [[ju.Collection]] containing a [[ByteString]] to [[ju.Map]] with [[String]] as + * as key. * * @param columnNames If given, these names are used as map keys; if not first stream element is used * @param charset Character set used to convert header line ByteString to String @@ -42,7 +43,7 @@ import pekko.util.ByteString private val in = Inlet[ju.Collection[ByteString]]("CsvToMap.in") private val out = Outlet[ju.Map[String, V]]("CsvToMap.out") - override val shape = FlowShape.of(in, out) + override val shape: FlowShape[ju.Collection[ByteString], ju.Map[String, V]] = FlowShape.of(in, out) val fieldValuePlaceholder: V @@ -64,15 +65,14 @@ import pekko.util.ByteString new InHandler { override def onPush(): Unit = { val elem = grab(in) - if (combineAll) { + if (combineAll) process(elem, zipAllWithHeaders) - } else { + else process(elem, zipWithHeaders) - } } }) - private def process(elem: ju.Collection[ByteString], combine: ju.Collection[V] => ju.Map[String, V]) = { + private def process(elem: ju.Collection[ByteString], combine: ju.Collection[V] => ju.Map[String, V]): Unit = { if (headers.isPresent) { val map = combine(transformElements(elem)) push(out, map) @@ -101,20 +101,19 @@ import pekko.util.ByteString val map = new ju.HashMap[String, V]() val hIter = headers.get.iterator() val colIter = elem.iterator() - if (headers.get.size() > elem.size()) { + if (headers.get.size() > elem.size()) while (hIter.hasNext) { - if (colIter.hasNext) { + if (colIter.hasNext) map.put(hIter.next(), colIter.next()) - } else { + else map.put(hIter.next(), customFieldValuePlaceholder.orElse(fieldValuePlaceholder)) - } } - } else if (elem.size() > headers.get.size()) { + else if (elem.size() > headers.get.size()) { var index = 0 while (colIter.hasNext) { - if (hIter.hasNext) { + if (hIter.hasNext) map.put(hIter.next(), colIter.next()) - } else { + else { map.put(headerPlaceholder.orElse("MissingHeader") + index, colIter.next()) index = index + 1 } diff --git a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapStage.scala b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapStage.scala index 5a12e4da2..53f6dee24 100644 --- a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapStage.scala +++ b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/impl/CsvToMapStage.scala @@ -45,7 +45,7 @@ import scala.collection.immutable private val in = Inlet[immutable.Seq[ByteString]]("CsvToMap.in") private val out = Outlet[Map[String, V]]("CsvToMap.out") - override val shape = FlowShape.of(in, out) + override val shape: FlowShape[immutable.Seq[ByteString], Map[String, V]] = FlowShape.of(in, out) val fieldValuePlaceholder: V @@ -59,17 +59,16 @@ import scala.collection.immutable override def onPush(): Unit = { val elem = grab(in) - if (combineAll) { + if (combineAll) process(elem, combineUsingPlaceholder(elem)) - } else { + else process(elem, headers => headers.get.zip(transformElements(elem)).toMap) - } } private def process(elem: immutable.Seq[ByteString], combine: => Headers => Map[String, V]): Unit = { - if (headers.isDefined) { + if (headers.isDefined) push(out, combine(headers)) - } else { + else { headers = Some(elem.map(_.decodeString(charset))) pull(in) } diff --git a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/model.scala b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/model.scala index fbcaac272..c152ea268 100644 --- a/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/model.scala +++ b/csv/src/main/scala/org/apache/pekko/stream/connectors/csv/model.scala @@ -19,11 +19,11 @@ class MalformedCsvException private[csv] (val lineNo: Long, val bytePos: Int, ms * Java API: * Returns the line number where the parser failed. */ - def getLineNo = lineNo + def getLineNo: Long = lineNo /** * Java API: * Returns the byte within the parsed line where the parser failed. */ - def getBytePos = bytePos + def getBytePos: Int = bytePos } diff --git a/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala index 71a7d797f..4117e175e 100644 --- a/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala +++ b/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala @@ -193,56 +193,49 @@ class CsvParsingSpec extends CsvSpec { .map(_.view.mapValues(_.utf8String).toIndexedSeq) .runWith(Sink.seq) val res = fut.futureValue - res(0) should contain allElementsOf ( - Map( - "Year" -> "1997", - "Make" -> "Ford", - "Model" -> "E350", - "Description" -> "ac, abs, moon", - "Price" -> "3000.00")) - res(1) should contain allElementsOf ( - Map( - "Year" -> "1999", - "Make" -> "Chevy", - "Model" -> "Venture \"Extended Edition\"", - "Description" -> "", - "Price" -> "4900.00")) - res(2) should contain allElementsOf ( - Map( - "Year" -> "1996", - "Make" -> "Jeep", - "Model" -> "Grand Cherokee", - "Description" -> """MUST SELL! + res(0) should contain allElementsOf Map( + "Year" -> "1997", + "Make" -> "Ford", + "Model" -> "E350", + "Description" -> "ac, abs, moon", + "Price" -> "3000.00") + res(1) should contain allElementsOf Map( + "Year" -> "1999", + "Make" -> "Chevy", + "Model" -> "Venture \"Extended Edition\"", + "Description" -> "", + "Price" -> "4900.00") + res(2) should contain allElementsOf Map( + "Year" -> "1996", + "Make" -> "Jeep", + "Model" -> "Grand Cherokee", + "Description" -> """MUST SELL! |air, moon roof, loaded""".stripMargin, - "Price" -> "4799.00")) - res(3) should contain allElementsOf ( - Map( - "Year" -> "1999", - "Make" -> "Chevy", - "Model" -> "Venture \"Extended Edition, Very Large\"", - "Description" -> "", - "Price" -> "5000.00")) - res(4) should contain allElementsOf ( - Map( - "Year" -> "", - "Make" -> "", - "Model" -> "Venture \"Extended Edition\"", - "Description" -> "", - "Price" -> "4900.00")) - res(5) should contain allElementsOf ( - Map( - "Year" -> "1995", - "Make" -> "VW", - "Model" -> "Golf \"GTE\"", - "Description" -> "", - "Price" -> "5000.00")) - res(6) should contain allElementsOf ( - Map( - "Year" -> "1996", - "Make" -> "VW", - "Model" -> "Golf GTE", - "Description" -> "", - "Price" -> "5000.00")) + "Price" -> "4799.00") + res(3) should contain allElementsOf Map( + "Year" -> "1999", + "Make" -> "Chevy", + "Model" -> "Venture \"Extended Edition, Very Large\"", + "Description" -> "", + "Price" -> "5000.00") + res(4) should contain allElementsOf Map( + "Year" -> "", + "Make" -> "", + "Model" -> "Venture \"Extended Edition\"", + "Description" -> "", + "Price" -> "4900.00") + res(5) should contain allElementsOf Map( + "Year" -> "1995", + "Make" -> "VW", + "Model" -> "Golf \"GTE\"", + "Description" -> "", + "Price" -> "5000.00") + res(6) should contain allElementsOf Map( + "Year" -> "1996", + "Make" -> "VW", + "Model" -> "Golf GTE", + "Description" -> "", + "Price" -> "5000.00") } } } diff --git a/doc-examples/src/test/java/org/apache/pekko/stream/connectors/eip/javadsl/PassThroughExamples.java b/doc-examples/src/test/java/org/apache/pekko/stream/connectors/eip/javadsl/PassThroughExamples.java index 4080a67e2..36cdc686f 100644 --- a/doc-examples/src/test/java/org/apache/pekko/stream/connectors/eip/javadsl/PassThroughExamples.java +++ b/doc-examples/src/test/java/org/apache/pekko/stream/connectors/eip/javadsl/PassThroughExamples.java @@ -128,7 +128,7 @@ public void dummy() { Consumer.DrainingControl control = Consumer.committableSource(consumerSettings, Subscriptions.topics("topic1")) .via(PassThroughFlow.create(writeFlow, Keep.right())) - .map(i -> i.committableOffset()) + .map(ConsumerMessage.CommittableMessage::committableOffset) .toMat(Committer.sink(comitterSettings), Keep.both()) .mapMaterializedValue(Consumer::createDrainingControl) .run(system); diff --git a/dynamodb/src/test/java/docs/javadsl/ExampleTest.java b/dynamodb/src/test/java/docs/javadsl/ExampleTest.java index 261e31d73..919096891 100644 --- a/dynamodb/src/test/java/docs/javadsl/ExampleTest.java +++ b/dynamodb/src/test/java/docs/javadsl/ExampleTest.java @@ -76,7 +76,7 @@ public static void setup() throws Exception { // #init-client .build(); - system.registerOnTermination(() -> client.close()); + system.registerOnTermination(client::close); // #init-client diff --git a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala index 475fc019c..199884466 100644 --- a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala +++ b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala @@ -109,7 +109,7 @@ class ExampleSpec } "flow with context" in { - case class SomeContext() + final case class SomeContext() // #withContext val source: SourceWithContext[PutItemRequest, SomeContext, NotUsed] = // ??? diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchSourceSettings.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchSourceSettings.scala index 3f56cd597..8bcba3b1d 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchSourceSettings.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchSourceSettings.scala @@ -18,7 +18,7 @@ import java.util.concurrent.TimeUnit import scala.concurrent.duration.FiniteDuration /** - * Configure Elastiscsearch sources. + * Configure Elasticsearch sources. */ final class ElasticsearchSourceSettings private (connection: ElasticsearchConnectionSettings, bufferSize: Int, diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/SourceSettingsBase.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/SourceSettingsBase.scala index d74ccb06a..2714eef32 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/SourceSettingsBase.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/SourceSettingsBase.scala @@ -65,6 +65,6 @@ abstract class SourceSettingsBase[Version <: ApiVersionBase, S <: SourceSettings bufferSize: Int = bufferSize, includeDocumentVersion: Boolean = includeDocumentVersion, scrollDuration: FiniteDuration = scrollDuration, - apiVersion: Version = apiVersion): S; + apiVersion: Version = apiVersion): S } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala index 627d99809..d4634a69f 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala @@ -181,7 +181,7 @@ trait MessageWriter[T] { def convert(message: T): String } -sealed class StringMessageWriter private () extends MessageWriter[String] { +sealed class StringMessageWriter extends MessageWriter[String] { override def convert(message: String): String = message } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala index 7acf6fb17..a82c423eb 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala @@ -43,5 +43,5 @@ abstract class WriteSettingsBase[Version <: ApiVersionBase, W <: WriteSettingsBa retryLogic: RetryLogic = retryLogic, versionType: Option[String] = versionType, apiVersion: Version = apiVersion, - allowExplicitIndex: Boolean = allowExplicitIndex): W; + allowExplicitIndex: Boolean = allowExplicitIndex): W } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchApi.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchApi.scala index 5f49006d5..358138bce 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchApi.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchApi.scala @@ -25,14 +25,12 @@ import scala.concurrent.Future @InternalApi private[impl] object ElasticsearchApi { def executeRequest( request: HttpRequest, - connectionSettings: ElasticsearchConnectionSettings)(implicit http: HttpExt): Future[HttpResponse] = { - if (connectionSettings.hasCredentialsDefined) { + connectionSettings: ElasticsearchConnectionSettings)(implicit http: HttpExt): Future[HttpResponse] = + if (connectionSettings.hasCredentialsDefined) http.singleRequest( request.addCredentials(BasicHttpCredentials(connectionSettings.username.get, connectionSettings.password.get))) - } else { + else http.singleRequest(request, connectionContext = connectionSettings.connectionContext.getOrElse(http.defaultClientHttpsContext)) - } - } } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala index 67513ecd1..b7ddb7426 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala @@ -44,7 +44,9 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C]( private val in = Inlet[(immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]])]("messagesAndResultPassthrough") private val out = Outlet[immutable.Seq[WriteResult[T, C]]]("result") - override val shape = FlowShape(in, out) + override val shape: FlowShape[(immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]]), immutable.Seq[ + WriteResult[T, C]]] = + FlowShape(in, out) private val restApi: RestBulkApi[T, C] = settings.apiVersion match { case ApiVersion.V5 => @@ -143,14 +145,13 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C]( } val messageResults = restApi.toWriteResults(messages, response) - if (log.isErrorEnabled) { + if (log.isErrorEnabled) messageResults.filterNot(_.success).foreach { failure => if (failure.getError.isPresent) { log.error(s"Received error from elastic when attempting to index documents. Error: {}", failure.getError.get) } } - } emit(out, messageResults ++ resultsPassthrough) if (isClosed(in)) completeStage() @@ -158,9 +159,8 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C]( } private def tryPull(): Unit = - if (!isClosed(in) && !hasBeenPulled(in)) { + if (!isClosed(in) && !hasBeenPulled(in)) pull(in) - } override def onUpstreamFinish(): Unit = if (!inflight) completeStage() diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSourceStage.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSourceStage.scala index c5b2207ce..ad8852f50 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSourceStage.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/ElasticsearchSourceStage.scala @@ -38,13 +38,13 @@ import scala.util.{ Failure, Success, Try } * INTERNAL API */ @InternalApi -private[elasticsearch] case class ScrollResponse[T](error: Option[String], result: Option[ScrollResult[T]]) +private[elasticsearch] final case class ScrollResponse[T](error: Option[String], result: Option[ScrollResult[T]]) /** * INTERNAL API */ @InternalApi -private[elasticsearch] case class ScrollResult[T](scrollId: Option[String], messages: Seq[ReadResult[T]]) +private[elasticsearch] final case class ScrollResult[T](scrollId: Option[String], messages: Seq[ReadResult[T]]) /** * INTERNAL API @@ -102,43 +102,39 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( private var pullIsWaitingForData = false private var dataReady: Option[ScrollResponse[T]] = None - def prepareUri(path: Path): Uri = { + def prepareUri(path: Path): Uri = Uri(settings.connection.baseUrl) .withPath(path) - } def sendScrollScanRequest(): Unit = try { waitingForElasticData = true scrollId match { - case None => { + case None => log.debug("Doing initial search") // Add extra params to search val extraParams = Seq( - if (!searchParams.contains("size")) { + if (!searchParams.contains("size")) Some("size" -> settings.bufferSize.toString) - } else { - None - }, + else + None, // Tell elastic to return the documents '_version'-property with the search-results // http://nocf-www.elastic.co/guide/en/elasticsearch/reference/current/search-request-version.html // https://www.elastic.co/guide/en/elasticsearch/guide/current/optimistic-concurrency-control.html - if (!searchParams.contains("version") && settings.includeDocumentVersion) { + if (!searchParams.contains("version") && settings.includeDocumentVersion) Some("version" -> "true") - } else { - None - }) + else + None) val baseMap = Map("scroll" -> settings.scroll) // only force sorting by _doc (meaning order is not known) if not specified in search params - val sortQueryParam = if (searchParams.contains("sort")) { + val sortQueryParam = if (searchParams.contains("sort")) None - } else { + else Some(("sort", "_doc")) - } val routingQueryParam = searchParams.get("routing").map(r => ("routing", r)) @@ -187,9 +183,8 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( .recover { case cause: Throwable => failureHandler.invoke(cause) } - } - case Some(actualScrollId) => { + case Some(actualScrollId) => log.debug("Fetching next scroll") val uri = prepareUri(Path("/_search/scroll")) @@ -221,7 +216,6 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( .recover { case cause: Throwable => failureHandler.invoke(cause) } - } } } catch { case ex: Exception => failureHandler.invoke(ex) @@ -240,10 +234,9 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( if (pullIsWaitingForData) { log.debug("Received data from elastic. Downstream has already called pull and is waiting for data") pullIsWaitingForData = false - if (handleScrollResponse(scrollResponse)) { + if (handleScrollResponse(scrollResponse)) // we should go and get more data sendScrollScanRequest() - } } else { log.debug("Received data from elastic. Downstream have not yet asked for it") // This is a prefetch of data which we received before downstream has asked for it @@ -284,10 +277,8 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( dataReady = None - if (!waitingForElasticData) { + if (!waitingForElasticData) sendScrollScanRequest() - } - } case None => if (pullIsWaitingForData) throw new Exception("This should not happen: Downstream is pulling more than once") @@ -296,9 +287,8 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( if (!waitingForElasticData) { log.debug("Downstream is pulling data. We must go and get it") sendScrollScanRequest() - } else { + } else log.debug("Downstream is pulling data. Already waiting for data") - } } /** @@ -317,12 +307,12 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( * Complete the stage successfully, whether or not the clear call succeeds. * If the clear call fails, the scroll will eventually timeout. */ - def clearScrollAsync(): Unit = { + def clearScrollAsync(): Unit = scrollId match { case None => log.debug("Scroll Id is empty. Completing stage eagerly.") completeStage() - case Some(actualScrollId) => { + case Some(actualScrollId) => // Clear the scroll val uri = prepareUri(Path(s"/_search/scroll/$actualScrollId")) @@ -350,9 +340,7 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T]( .recover { case cause: Throwable => failureHandler.invoke(cause) } - } } - } private val clearScrollAsyncHandler = getAsyncCallback[Try[String]] { result => { diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala index e9db5c111..c7ef22f32 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala @@ -58,23 +58,21 @@ private[impl] abstract class RestBulkApi[T, C] { private def buildMessageResults(items: JsArray, messages: immutable.Seq[WriteMessage[T, C]]): immutable.Seq[WriteResult[T, C]] = { val ret = new immutable.VectorBuilder[WriteResult[T, C]] - ret.sizeHint(messages) val itemsIter = items.elements.iterator messages.foreach { message => - if (message.operation == Nop) { + if (message.operation == Nop) // client just wants to pass-through: ret += new WriteResult(message, None) - } else { + else { if (itemsIter.hasNext) { // good message val command = message.operation.command val res = itemsIter.next().asJsObject.fields(command).asJsObject val error: Option[String] = res.fields.get("error").map(_.compactPrint) ret += new WriteResult(message, error) - } else { + } else // error? ret += new WriteResult(message, None) - } } } ret.result() diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV5.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV5.scala index e4e5f2069..11763084d 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV5.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV5.scala @@ -74,11 +74,10 @@ private[impl] final class RestBulkApiV5[T, C](indexName: String, } override def constructSharedFields(message: WriteMessage[T, C]): Seq[(String, JsString)] = { - val operationFields = if (allowExplicitIndex) { + val operationFields = if (allowExplicitIndex) Seq("_index" -> JsString(message.indexName.getOrElse(indexName)), typeNameTuple) - } else { + else Seq(typeNameTuple) - } operationFields ++ message.customMetadata.map { case (field, value) => field -> JsString(value) } } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV7.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV7.scala index 5e0c896c2..66171bb20 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV7.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApiV7.scala @@ -66,11 +66,10 @@ private[impl] final class RestBulkApiV7[T, C](indexName: String, } override def constructSharedFields(message: WriteMessage[T, C]): Seq[(String, JsString)] = { - val operationFields = if (allowExplicitIndex) { + val operationFields = if (allowExplicitIndex) Seq("_index" -> JsString(message.indexName.getOrElse(indexName))) - } else { + else Seq.empty - } operationFields ++ message.customMetadata.map { case (field, value) => field -> JsString(value) } } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/javadsl/ElasticsearchSource.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/javadsl/ElasticsearchSource.scala index d3f2b7802..5bab8ba11 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/javadsl/ElasticsearchSource.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/javadsl/ElasticsearchSource.scala @@ -171,9 +171,9 @@ object ElasticsearchSource { val jsonTree = mapper.readTree(json) - if (jsonTree.has("error")) { + if (jsonTree.has("error")) impl.ScrollResponse(Some(jsonTree.get("error").asText()), None) - } else { + else { val scrollId = Option(jsonTree.get("_scroll_id")).map(_.asText()) val hits = jsonTree.get("hits").get("hits").asInstanceOf[ArrayNode] val messages = hits.elements().asScala.toList.map { element => diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala index 3c80a959e..fef7ff1d1 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala @@ -48,12 +48,11 @@ object ElasticsearchFlow { */ def create[T](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _], - writer: MessageWriter[T]): Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] = { + writer: MessageWriter[T]): Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] = Flow[WriteMessage[T, NotUsed]] .batch(settings.bufferSize, immutable.Seq(_)) { case (seq, wm) => seq :+ wm } .via(stageFlow(elasticsearchParams, settings, writer)) .mapConcat(identity) - } /** * Create a flow to update Elasticsearch with [[pekko.stream.connectors.elasticsearch.WriteMessage WriteMessage]]s containing type `T` @@ -75,12 +74,11 @@ object ElasticsearchFlow { */ def createWithPassThrough[T, C](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _], - writer: MessageWriter[T]): Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] = { + writer: MessageWriter[T]): Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] = Flow[WriteMessage[T, C]] .batch(settings.bufferSize, immutable.Seq(_)) { case (seq, wm) => seq :+ wm } .via(stageFlow(elasticsearchParams, settings, writer)) .mapConcat(identity) - } /** * Create a flow to update Elasticsearch with @@ -106,9 +104,8 @@ object ElasticsearchFlow { def createBulk[T, C]( elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _], - writer: MessageWriter[T]): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] = { + writer: MessageWriter[T]): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] = stageFlow(elasticsearchParams, settings, writer) - } /** * Create a flow to update Elasticsearch with [[pekko.stream.connectors.elasticsearch.WriteMessage WriteMessage]]s containing type `T` @@ -134,19 +131,18 @@ object ElasticsearchFlow { def createWithContext[T, C]( elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _], - writer: MessageWriter[T]): FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] = { + writer: MessageWriter[T]): FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] = Flow[WriteMessage[T, C]] .batch(settings.bufferSize, immutable.Seq(_)) { case (seq, wm) => seq :+ wm } .via(stageFlow(elasticsearchParams, settings, writer)) .mapConcat(identity) .asFlowWithContext[WriteMessage[T, NotUsed], C, C]((res, c) => res.withPassThrough(c))(p => p.message.passThrough) - } @InternalApi private def stageFlow[T, C]( elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _], - writer: MessageWriter[T]): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] = { + writer: MessageWriter[T]): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] = if (settings.retryLogic == RetryNever) { val basicFlow = basicStageFlow[T, C](elasticsearchParams, settings, writer) Flow[immutable.Seq[WriteMessage[T, C]]] @@ -177,11 +173,10 @@ object ElasticsearchFlow { .via(retryFlow) .via(applyOrderingFlow[T, C]) } - } @InternalApi private def amendWithIndexFlow[T, C]: Flow[immutable.Seq[WriteMessage[T, C]], (immutable.Seq[WriteMessage[T, (Int, - C)]], immutable.Seq[WriteResult[T, (Int, C)]]), NotUsed] = { + C)]], immutable.Seq[WriteResult[T, (Int, C)]]), NotUsed] = Flow[immutable.Seq[WriteMessage[T, C]]].map { messages => val indexedMessages = messages.zipWithIndex.map { case (m, idx) => @@ -189,11 +184,10 @@ object ElasticsearchFlow { } indexedMessages -> Nil } - } @InternalApi private def applyOrderingFlow[T, C] - : Flow[immutable.Seq[WriteResult[T, (Int, C)]], immutable.Seq[WriteResult[T, C]], NotUsed] = { + : Flow[immutable.Seq[WriteResult[T, (Int, C)]], immutable.Seq[WriteResult[T, C]], NotUsed] = Flow[immutable.Seq[WriteResult[T, (Int, C)]]].map { results => val orderedResults = results.sortBy(_.message.passThrough._1) val finalResults = orderedResults.map { r => @@ -201,12 +195,11 @@ object ElasticsearchFlow { } finalResults } - } @InternalApi private def basicStageFlow[T, C](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _], - writer: MessageWriter[T]) = { + writer: MessageWriter[T]) = Flow .fromMaterializer { (mat, _) => implicit val system: ActorSystem = mat.system @@ -218,7 +211,6 @@ object ElasticsearchFlow { } } .mapMaterializedValue(_ => NotUsed) - } private final class SprayJsonWriter[T](implicit writer: JsonWriter[T]) extends MessageWriter[T] { override def convert(message: T): String = message.toJson.compactPrint diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchSource.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchSource.scala index 94e23a22c..636e17315 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchSource.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchSource.scala @@ -128,10 +128,9 @@ object ElasticsearchSource { override def convert(json: String): impl.ScrollResponse[T] = { val jsObj = json.parseJson.asJsObject jsObj.fields.get("error") match { - case Some(error) => { + case Some(error) => impl.ScrollResponse(Some(error.toString), None) - } - case None => { + case None => val scrollId = jsObj.fields.get("_scroll_id").map(v => v.asInstanceOf[JsString].value) val hits = jsObj.fields("hits").asJsObject.fields("hits").asInstanceOf[JsArray] val messages = hits.elements.map { element => @@ -143,7 +142,6 @@ object ElasticsearchSource { new ReadResult(id, source.convertTo[T], version) } impl.ScrollResponse(None, Some(impl.ScrollResult(scrollId, messages))) - } } } diff --git a/elasticsearch/src/test/java/docs/javadsl/ElasticsearchParameterizedTest.java b/elasticsearch/src/test/java/docs/javadsl/ElasticsearchParameterizedTest.java index 60a1ffda4..526d52ea2 100644 --- a/elasticsearch/src/test/java/docs/javadsl/ElasticsearchParameterizedTest.java +++ b/elasticsearch/src/test/java/docs/javadsl/ElasticsearchParameterizedTest.java @@ -31,8 +31,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; @RunWith(value = Parameterized.class) public class ElasticsearchParameterizedTest extends ElasticsearchTestBase { @@ -164,7 +163,7 @@ public void testUsingVersions() throws Exception { .get(0) .success(); - assertEquals(false, success); + assertFalse(success); } @Test diff --git a/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV5Test.java b/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV5Test.java index d719bb3d7..89b276bff 100644 --- a/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV5Test.java +++ b/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV5Test.java @@ -414,7 +414,8 @@ public void testUsingSearchParams() throws Exception { searchParams.put("query", "{\"match_all\": {}}"); searchParams.put("_source", "[\"id\", \"a\", \"c\"]"); - List result = + // These documents will only have property id, a and c (not + List result = ElasticsearchSource.typed( constructElasticsearchParams(indexName, typeName, ApiVersion.V5), searchParams, // <-- Using searchParams @@ -422,10 +423,7 @@ public void testUsingSearchParams() throws Exception { .withApiVersion(ApiVersion.V5), TestDoc.class, new ObjectMapper()) - .map( - o -> { - return o.source(); // These documents will only have property id, a and c (not - }) + .map(ReadResult::source) .runWith(Sink.seq(), system) .toCompletableFuture() .get(); @@ -435,10 +433,7 @@ public void testUsingSearchParams() throws Exception { assertEquals( docs.size(), result.stream() - .filter( - d -> { - return d.a != null && d.b == null; - }) + .filter(d -> d.a != null && d.b == null) .collect(Collectors.toList()) .size()); } diff --git a/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV7Test.java b/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV7Test.java index e27abda24..bd7c323cd 100644 --- a/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV7Test.java +++ b/elasticsearch/src/test/java/docs/javadsl/ElasticsearchV7Test.java @@ -33,6 +33,7 @@ import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; public class ElasticsearchV7Test extends ElasticsearchTestBase { @BeforeClass @@ -184,7 +185,7 @@ public void flow() throws Exception { flushAndRefresh("sink3"); for (WriteResult aResult1 : result1) { - assertEquals(true, aResult1.success()); + assertTrue(aResult1.success()); } // Assert docs in sink3/book @@ -240,7 +241,7 @@ public void stringFlow() throws Exception { flushAndRefresh(indexName); for (WriteResult aResult1 : result1) { - assertEquals(true, aResult1.success()); + assertTrue(aResult1.success()); } CompletionStage> f2 = @@ -403,7 +404,8 @@ public void testUsingSearchParams() throws Exception { searchParams.put("query", "{\"match_all\": {}}"); searchParams.put("_source", "[\"id\", \"a\", \"c\"]"); - List result = + // These documents will only have property id, a and c (not + List result = ElasticsearchSource.typed( constructElasticsearchParams(indexName, typeName, ApiVersion.V7), searchParams, // <-- Using searchParams @@ -411,10 +413,7 @@ public void testUsingSearchParams() throws Exception { .withApiVersion(ApiVersion.V7), TestDoc.class, new ObjectMapper()) - .map( - o -> { - return o.source(); // These documents will only have property id, a and c (not - }) + .map(ReadResult::source) .runWith(Sink.seq(), system) .toCompletableFuture() .get(); @@ -424,10 +423,7 @@ public void testUsingSearchParams() throws Exception { assertEquals( docs.size(), result.stream() - .filter( - d -> { - return d.a != null && d.b == null; - }) + .filter(d -> d.a != null && d.b == null) .collect(Collectors.toList()) .size()); } diff --git a/elasticsearch/src/test/java/docs/javadsl/OpensearchParameterizedTest.java b/elasticsearch/src/test/java/docs/javadsl/OpensearchParameterizedTest.java index 9eac31305..174db675f 100644 --- a/elasticsearch/src/test/java/docs/javadsl/OpensearchParameterizedTest.java +++ b/elasticsearch/src/test/java/docs/javadsl/OpensearchParameterizedTest.java @@ -31,8 +31,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; @RunWith(value = Parameterized.class) public class OpensearchParameterizedTest extends ElasticsearchTestBase { @@ -159,7 +158,7 @@ public void testUsingVersions() throws Exception { .get(0) .success(); - assertEquals(false, success); + assertFalse(success); } @Test diff --git a/elasticsearch/src/test/java/docs/javadsl/OpensearchV1Test.java b/elasticsearch/src/test/java/docs/javadsl/OpensearchV1Test.java index e6cfb6c34..1d2dc4314 100644 --- a/elasticsearch/src/test/java/docs/javadsl/OpensearchV1Test.java +++ b/elasticsearch/src/test/java/docs/javadsl/OpensearchV1Test.java @@ -33,6 +33,7 @@ import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; public class OpensearchV1Test extends ElasticsearchTestBase { @BeforeClass @@ -188,7 +189,7 @@ public void flow() throws Exception { flushAndRefresh("sink3"); for (WriteResult aResult1 : result1) { - assertEquals(true, aResult1.success()); + assertTrue(aResult1.success()); } // Assert docs in sink3/book @@ -246,7 +247,7 @@ public void stringFlow() throws Exception { flushAndRefresh(indexName); for (WriteResult aResult1 : result1) { - assertEquals(true, aResult1.success()); + assertTrue(aResult1.success()); } CompletionStage> f2 = @@ -414,7 +415,8 @@ public void testUsingSearchParams() throws Exception { searchParams.put("query", "{\"match_all\": {}}"); searchParams.put("_source", "[\"id\", \"a\", \"c\"]"); - List result = + // These documents will only have property id, a and c (not + List result = ElasticsearchSource.typed( constructElasticsearchParams(indexName, typeName, OpensearchApiVersion.V1), searchParams, // <-- Using searchParams @@ -422,10 +424,7 @@ public void testUsingSearchParams() throws Exception { .withApiVersion(OpensearchApiVersion.V1), TestDoc.class, new ObjectMapper()) - .map( - o -> { - return o.source(); // These documents will only have property id, a and c (not - }) + .map(ReadResult::source) .runWith(Sink.seq(), system) .toCompletableFuture() .get(); @@ -435,10 +434,7 @@ public void testUsingSearchParams() throws Exception { assertEquals( docs.size(), result.stream() - .filter( - d -> { - return d.a != null && d.b == null; - }) + .filter(d -> d.a != null && d.b == null) .collect(Collectors.toList()) .size()); } diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala index 9f360a90d..beeb5b0b7 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala @@ -452,7 +452,7 @@ trait ElasticsearchConnectorBehaviour { "read and write document-version if configured to do so" in { - case class VersionTestDoc(id: String, name: String, value: Int) + final case class VersionTestDoc(id: String, name: String, value: Int) implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc.apply) val indexName = "version-test-scala" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala index 2c67de274..8bd99295e 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala @@ -94,15 +94,13 @@ trait ElasticsearchSpecUtils { this: AnyWordSpec with ScalaFutures => def constructElasticsearchParams(indexName: String, typeName: String, - apiVersion: ApiVersionBase): ElasticsearchParams = { - if (apiVersion == pekko.stream.connectors.elasticsearch.ApiVersion.V5) { + apiVersion: ApiVersionBase): ElasticsearchParams = + if (apiVersion == pekko.stream.connectors.elasticsearch.ApiVersion.V5) ElasticsearchParams.V5(indexName, typeName) - } else if (apiVersion == pekko.stream.connectors.elasticsearch.ApiVersion.V7) { + else if (apiVersion == pekko.stream.connectors.elasticsearch.ApiVersion.V7) ElasticsearchParams.V7(indexName) - } else if (apiVersion == OpensearchApiVersion.V1) { + else if (apiVersion == OpensearchApiVersion.V1) OpensearchParams.V1(indexName) - } else { + else throw new IllegalArgumentException(s"API version $apiVersion is not supported") - } - } } diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala index 7bc0bb25b..23b5b8c12 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala @@ -43,11 +43,9 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt private val baseSourceSettings = ElasticsearchSourceSettings(connectionSettings).withApiVersion(ApiVersion.V5) private val baseWriteSettings = ElasticsearchWriteSettings(connectionSettings).withApiVersion(ApiVersion.V5) - override protected def beforeAll(): Unit = { - insertTestData(connectionSettings) - } + override protected def beforeAll(): Unit = insertTestData(connectionSettings) - override def afterAll() = { + override def afterAll(): Unit = { val deleteRequest = HttpRequest(HttpMethods.DELETE) .withUri(Uri(connectionSettings.baseUrl).withPath(Path("/_all"))) http.singleRequest(deleteRequest).futureValue @@ -185,8 +183,8 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), @@ -236,8 +234,8 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), @@ -288,8 +286,8 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book A", shouldSkip = Some(true)), KafkaOffset(0)), @@ -346,8 +344,8 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1", shouldSkip = Some(true)), KafkaOffset(0)), @@ -477,7 +475,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt import DefaultJsonProtocol._ // #custom-search-params - case class TestDoc(id: String, a: String, b: Option[String], c: String) + final case class TestDoc(id: String, a: String, b: Option[String], c: String) // #custom-search-params implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala index bccd5afeb..00413f820 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala @@ -35,11 +35,9 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt private val baseSourceSettings = ElasticsearchSourceSettings(connectionSettings).withApiVersion(ApiVersion.V7) private val baseWriteSettings = ElasticsearchWriteSettings(connectionSettings).withApiVersion(ApiVersion.V7) - override protected def beforeAll() = { - insertTestData(connectionSettings) - } + override protected def beforeAll(): Unit = insertTestData(connectionSettings) - override def afterAll() = { + override def afterAll(): Unit = { val deleteRequest = HttpRequest(HttpMethods.DELETE) .withUri(Uri(connectionSettings.baseUrl).withPath(Path("/_all"))) http.singleRequest(deleteRequest).futureValue @@ -172,8 +170,8 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), @@ -222,8 +220,8 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), @@ -273,8 +271,8 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book A", shouldSkip = Some(true)), KafkaOffset(0)), @@ -330,8 +328,8 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1", shouldSkip = Some(true)), KafkaOffset(0)), @@ -456,7 +454,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt import spray.json._ import DefaultJsonProtocol._ - case class TestDoc(id: String, a: String, b: Option[String], c: String) + final case class TestDoc(id: String, a: String, b: Option[String], c: String) implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala index 1c235685a..0598bd4c5 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala @@ -452,7 +452,7 @@ trait OpensearchConnectorBehaviour { "read and write document-version if configured to do so" in { - case class VersionTestDoc(id: String, name: String, value: Int) + final case class VersionTestDoc(id: String, name: String, value: Int) implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc.apply) val indexName = "version-test-scala" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala index 81a093399..69ca05f19 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala @@ -43,11 +43,9 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils private val baseSourceSettings = OpensearchSourceSettings(connectionSettings).withApiVersion(OpensearchApiVersion.V1) private val baseWriteSettings = OpensearchWriteSettings(connectionSettings).withApiVersion(OpensearchApiVersion.V1) - override protected def beforeAll() = { - insertTestData(connectionSettings) - } + override protected def beforeAll(): Unit = insertTestData(connectionSettings) - override def afterAll() = { + override def afterAll(): Unit = { val deleteRequest = HttpRequest(HttpMethods.DELETE) .withUri(Uri(connectionSettings.baseUrl).withPath(Path("/_all"))) http.singleRequest(deleteRequest).futureValue @@ -190,8 +188,8 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), @@ -240,8 +238,8 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), @@ -291,8 +289,8 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book A", shouldSkip = Some(true)), KafkaOffset(0)), @@ -348,8 +346,8 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils // After we've written them to Elastic, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1", shouldSkip = Some(true)), KafkaOffset(0)), @@ -478,7 +476,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils import spray.json._ import DefaultJsonProtocol._ - case class TestDoc(id: String, a: String, b: Option[String], c: String) + final case class TestDoc(id: String, a: String, b: Option[String], c: String) implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) diff --git a/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/Directory.java b/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/Directory.java index 518ed3d6a..cfefcd925 100644 --- a/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/Directory.java +++ b/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/Directory.java @@ -38,7 +38,7 @@ public static Source walk(Path directory) { /** * Recursively list files and directories in the given directory, depth first, with a maximum * directory depth limit and a possibly set of options (See {@link java.nio.file.Files#walk} for - * details. + * details). */ public static Source walk( Path directory, int maxDepth, FileVisitOption... options) { diff --git a/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/FileTailSource.java b/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/FileTailSource.java index c8da29ee1..244979839 100644 --- a/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/FileTailSource.java +++ b/file/src/main/java/org/apache/pekko/stream/connectors/file/javadsl/FileTailSource.java @@ -90,7 +90,7 @@ public static Source createLines( path, maxChunkSize, pollingInterval, - System.getProperty("line.separator"), + System.lineSeparator(), StandardCharsets.UTF_8); } } diff --git a/file/src/main/mima-filters/1.1.x.backwards.excludes/LogRotatorSink-missing-private-methods.backwards.excludes b/file/src/main/mima-filters/1.1.x.backwards.excludes/LogRotatorSink-missing-private-methods.backwards.excludes new file mode 100644 index 000000000..b935f02d0 --- /dev/null +++ b/file/src/main/mima-filters/1.1.x.backwards.excludes/LogRotatorSink-missing-private-methods.backwards.excludes @@ -0,0 +1,11 @@ +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink.in") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.triggerGenerator") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.sourceOut") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.sourceOut_=") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.sinkCompletions") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.sinkCompletions_=") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.isFinishing") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.isFinishing_=") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.failThisStage") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.completeThisStage") +ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.stream.connectors.file.scaladsl.LogRotatorSink#Logic.checkTrigger") diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/EnsureByteStreamSize.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/EnsureByteStreamSize.scala index fe752a0c2..770e0c25d 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/EnsureByteStreamSize.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/EnsureByteStreamSize.scala @@ -25,10 +25,10 @@ import pekko.util.ByteString @InternalApi private[file] class EnsureByteStreamSize(expectedSize: Long) extends GraphStage[FlowShape[ByteString, ByteString]] { - val in = Inlet[ByteString]("EnsureByteStreamSize.in") - val out = Outlet[ByteString]("EnsureByteStreamSize.out") + val in: Inlet[ByteString] = Inlet[ByteString]("EnsureByteStreamSize.in") + val out: Outlet[ByteString] = Outlet[ByteString]("EnsureByteStreamSize.out") - override val shape = FlowShape.of(in, out) + override val shape: FlowShape[ByteString, ByteString] = FlowShape.of(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var currentSize = 0L @@ -42,16 +42,14 @@ import pekko.util.ByteString push(out, elem) } - override def onUpstreamFinish(): Unit = { + override def onUpstreamFinish(): Unit = if (currentSize == expectedSize) super.onUpstreamFinish() - else failStage(new IllegalStateException(s"Expected ${expectedSize} bytes but got ${currentSize} bytes")) - } + else failStage(new IllegalStateException(s"Expected $expectedSize bytes but got $currentSize bytes")) }) setHandler(out, new OutHandler { - override def onPull(): Unit = { + override def onPull(): Unit = pull(in) - } }) } diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/FileByteStringSeparators.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/FileByteStringSeparators.scala index b4cd02be0..72bfb4139 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/FileByteStringSeparators.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/FileByteStringSeparators.scala @@ -42,12 +42,11 @@ import pekko.util.ByteString def getPathFromStartingByteString(b: ByteString): String = { val splitted = b.utf8String.split(separator) - if (splitted.length == 1) { + if (splitted.length == 1) "" - } else if (splitted.length == 2) { + else if (splitted.length == 2) splitted.tail.head - } else { + else splitted.tail.mkString(separator.toString) - } } } diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveEntry.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveEntry.scala index 2ff3bde4d..982ae012f 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveEntry.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveEntry.scala @@ -81,10 +81,9 @@ import pekko.util.ByteString groupIdLength) } - private val fixedData2 = { + private val fixedData2 = // [148, 156) ByteString(" ") // headerChecksumLength - } // [156, 157) // linkIndicatorLength @@ -116,9 +115,8 @@ import pekko.util.ByteString else bytes } - private def empty(size: Int) = { + private def empty(size: Int) = ByteString.fromArrayUnsafe(new Array[Byte](size)) - } def parse(bs: ByteString): TarArchiveMetadata = { require(bs.length >= headerLength, s"the tar archive header is expected to be at least 512 bytes") diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveManager.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveManager.scala index b008abe50..494c9707c 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveManager.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarArchiveManager.scala @@ -25,7 +25,7 @@ import pekko.util.ByteString */ @InternalApi private[file] object TarArchiveManager { - def tarFlow(): Flow[(TarArchiveMetadata, Source[ByteString, _]), ByteString, NotUsed] = { + def tarFlow(): Flow[(TarArchiveMetadata, Source[ByteString, _]), ByteString, NotUsed] = Flow[(TarArchiveMetadata, Source[ByteString, Any])] .flatMapConcat { case (metadata, stream) => @@ -35,6 +35,5 @@ import pekko.util.ByteString .concat(stream.via(new EnsureByteStreamSize(metadata.size))) .concat(Source.single(entry.trailingBytes)) } - } } diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarReaderStage.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarReaderStage.scala index b6239f3e9..a306c3d8c 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarReaderStage.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/TarReaderStage.scala @@ -48,24 +48,22 @@ private[file] class TarReaderStage setHandlers(flowIn, flowOut, new CollectHeader(ByteString.empty)) - def readHeader(buffer: ByteString): Unit = { - if (buffer.length >= TarArchiveEntry.headerLength) { + def readHeader(buffer: ByteString): Unit = + if (buffer.length >= TarArchiveEntry.headerLength) readFile(buffer) - } else { + else { tryPullIfNeeded() setHandlers(flowIn, flowOut, new CollectHeader(buffer)) } - } def readFile(headerBuffer: ByteString): Unit = { - def pushSource(metadata: TarArchiveMetadata, buffer: ByteString): Unit = { + def pushSource(metadata: TarArchiveMetadata, buffer: ByteString): Unit = if (buffer.length >= metadata.size) { val (emit, remain) = buffer.splitAt(metadata.size.toInt) log.debug("emitting completed source for [{}]", metadata) push(flowOut, metadata -> Source.single(emit)) readTrailer(metadata, remain, subSource = None) } else setHandlers(flowIn, flowOut, new CollectFile(metadata, buffer)) - } if (headerBuffer.head == 0) { log.debug("empty filename, detected EOF padding, completing") @@ -74,11 +72,10 @@ private[file] class TarReaderStage } else { val metadata = TarArchiveEntry.parse(headerBuffer) val buffer = headerBuffer.drop(TarArchiveEntry.headerLength) - if (isAvailable(flowOut)) { + if (isAvailable(flowOut)) pushSource(metadata, buffer) - } else { + else setHandlers(flowIn, flowOut, new PushSourceOnPull(metadata, buffer)) - } } final class PushSourceOnPull(metadata: TarArchiveMetadata, buffer: ByteString) @@ -106,7 +103,7 @@ private[file] class TarReaderStage } else setHandlers(flowIn, flowOut, new ReadPastTrailer(metadata, buffer, subSource)) } - override protected def onTimer(timerKey: Any): Unit = { + override protected def onTimer(timerKey: Any): Unit = timerKey match { case SubscriptionTimeout(subSource) => import StreamSubscriptionTimeoutTerminationMode._ @@ -131,12 +128,10 @@ private[file] class TarReaderStage case other => log.warning("unexpected timer [{}]", other) } - } - private def tryPullIfNeeded(): Unit = { + private def tryPullIfNeeded(): Unit = if (!hasBeenPulled(flowIn)) tryPull(flowIn) - } /** * Don't react on downstream pulls until we have something to push. @@ -164,11 +159,10 @@ private[file] class TarReaderStage override def onPush(): Unit = { buffer ++= grab(flowIn) - if (buffer.length >= TarArchiveEntry.headerLength) { + if (buffer.length >= TarArchiveEntry.headerLength) readFile(buffer) - } else { + else tryPullIfNeeded() - } } override def onUpstreamFinish(): Unit = { @@ -191,17 +185,14 @@ private[file] class TarReaderStage private val subSource: FileOutSubSource = { val sub = new FileOutSubSource() val timeoutSignal = SubscriptionTimeout(sub) - sub.setHandler(new OutHandler { - override def onPull(): Unit = { - cancelTimer(timeoutSignal) - if (buffer.nonEmpty) { - subPush(buffer) - buffer = ByteString.empty - if (isClosed(flowIn)) onUpstreamFinish() - } else { - tryPullIfNeeded() - } - } + sub.setHandler(() => { + cancelTimer(timeoutSignal) + if (buffer.nonEmpty) { + subPush(buffer) + buffer = ByteString.empty + if (isClosed(flowIn)) onUpstreamFinish() + } else + tryPullIfNeeded() }) val timeout = attributes.get[ActorAttributes.StreamSubscriptionTimeout].get.timeout scheduleOnce(timeoutSignal, timeout) @@ -224,17 +215,15 @@ private[file] class TarReaderStage } } - override def onPush(): Unit = { + override def onPush(): Unit = subPush(grab(flowIn)) - } - override def onUpstreamFinish(): Unit = { - if (buffer.isEmpty) { + override def onUpstreamFinish(): Unit = + if (buffer.isEmpty) failStage( new TarReaderException( s"incomplete tar file contents for [${metadata.filePath}] expected ${metadata.size} bytes, received $emitted bytes")) - } else setKeepGoing(true) - } + else setKeepGoing(true) } @@ -259,24 +248,22 @@ private[file] class TarReaderStage tryPullIfNeeded() } readHeader(buffer.drop(trailerLength)) - } else { + } else tryPullIfNeeded() - } } - override def onUpstreamFinish(): Unit = { + override def onUpstreamFinish(): Unit = if (buffer.length == trailerLength) completeStage() else failStage( new TarReaderException( - s"incomplete tar file trailer for [${metadata.filePath}] expected ${trailerLength} bytes, received ${buffer.length} bytes")) - } + s"incomplete tar file trailer for [${metadata.filePath}] expected $trailerLength bytes, received ${buffer.length} bytes")) } /** * "At the end of the archive file there are two 512-byte blocks filled with binary zeros as an end-of-file marker." */ - private final class FlushEndOfFilePadding() extends InHandler with IgnoreDownstreamPull { + private final class FlushEndOfFilePadding extends InHandler with IgnoreDownstreamPull { override def onPush(): Unit = { grab(flowIn) diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipArchiveFlow.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipArchiveFlow.scala index 0e5077fde..e299e542e 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipArchiveFlow.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipArchiveFlow.scala @@ -42,9 +42,8 @@ import pekko.util.{ ByteString, ByteStringBuilder } if (isClosed(in)) { emptyStream = true completeStage() - } else { + } else pull(in) - } }) setHandler( @@ -68,9 +67,8 @@ import pekko.util.{ ByteString, ByteStringBuilder } if (result.nonEmpty) { builder.clear() push(out, result) - } else { + } else pull(in) - } } override def onUpstreamFinish(): Unit = { diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipReaderSource.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipReaderSource.scala index ece284b43..1614e5a61 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipReaderSource.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/impl/archive/ZipReaderSource.scala @@ -35,17 +35,16 @@ import java.util.zip.{ ZipEntry, ZipInputStream } override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { val zis = new ZipInputStream(new FileInputStream(f), fileCharset) - var entry: ZipEntry = null + var entry: ZipEntry = _ val data = new Array[Byte](chunkSize) - def seek() = { + def seek(): Unit = while ({ - entry = zis.getNextEntry() + entry = zis.getNextEntry entry != null && entry.getName != n.name }) { zis.closeEntry() } - } setHandler( out, @@ -53,17 +52,15 @@ import java.util.zip.{ ZipEntry, ZipInputStream } override def onPull(): Unit = { if (entry == null) { seek() - if (entry == null) { + if (entry == null) failStage(new Exception("After a seek the part is not found")) - } } val c = zis.read(data, 0, chunkSize) - if (c == -1) { + if (c == -1) completeStage() - } else { + else push(out, ByteString.fromArray(data, 0, c)) - } } }) diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/Archive.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/Archive.scala index 16e9d5b78..b01338ccd 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/Archive.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/Archive.scala @@ -85,7 +85,5 @@ object Archive { Pair(metadata, source.asJava) }) - private def func[T, R](f: T => R) = new pekko.japi.function.Function[T, R] { - override def apply(param: T): R = f(param) - } + private def func[T, R](f: T => R): pekko.japi.function.Function[T, R] = (param: T) => f(param) } diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala index 75f0ffe74..e5f08d22d 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala @@ -55,7 +55,6 @@ object LogRotatorSink { def createFromFunctionAndOptions( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[Path]]], fileOpenOptions: java.util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[Done]] = { - val logRotatorSink = new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl .LogRotatorSink(asScala(triggerGeneratorCreator), fileOpenOptions.asScala.toSet)) new javadsl.Sink(logRotatorSink.toCompletionStage()) diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/model.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/model.scala index 844416179..c6172dad3 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/model.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/model.scala @@ -26,7 +26,7 @@ object ArchiveMetadata { } final case class ZipArchiveMetadata(name: String) { - def getName() = name + def getName(): String = name } object ZipArchiveMetadata { def create(name: String): ZipArchiveMetadata = ZipArchiveMetadata(name) @@ -41,7 +41,7 @@ final class TarArchiveMetadata private ( * See constants `TarchiveMetadata.linkIndicatorNormal` */ val linkIndicatorByte: Byte) { - val filePath = filePathPrefix match { + val filePath: String = filePathPrefix match { case None => filePathName case Some(prefix) => prefix + "/" + filePathName } @@ -90,20 +90,19 @@ object TarArchiveMetadata { def apply(filePath: String, size: Long): TarArchiveMetadata = apply(filePath, size, Instant.now) def apply(filePath: String, size: Long, lastModification: Instant): TarArchiveMetadata = { val filePathSegments = filePath.lastIndexOf("/") - val filePathPrefix = if (filePathSegments > 0) { + val filePathPrefix = if (filePathSegments > 0) Some(filePath.substring(0, filePathSegments)) - } else None + else None val filePathName = filePath.substring(filePathSegments + 1, filePath.length) apply(filePathPrefix, filePathName, size, lastModification, linkIndicatorNormal) } - def apply(filePathPrefix: String, filePathName: String, size: Long, lastModification: Instant): TarArchiveMetadata = { + def apply(filePathPrefix: String, filePathName: String, size: Long, lastModification: Instant): TarArchiveMetadata = apply(if (filePathPrefix.isEmpty) None else Some(filePathPrefix), filePathName, size, lastModification, linkIndicatorNormal) - } /** * @param linkIndicatorByte See constants eg. `TarchiveMetadata.linkIndicatorNormal` @@ -112,13 +111,12 @@ object TarArchiveMetadata { filePathName: String, size: Long, lastModification: Instant, - linkIndicatorByte: Byte): TarArchiveMetadata = { + linkIndicatorByte: Byte): TarArchiveMetadata = apply(if (filePathPrefix.isEmpty) None else Some(filePathPrefix), filePathName, size, lastModification, linkIndicatorByte) - } private def apply(filePathPrefix: Option[String], filePathName: String, diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala index fc8e2763d..19e420e24 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/FileTailSource.scala @@ -64,7 +64,7 @@ object FileTailSource { def lines(path: Path, maxLineSize: Int, pollingInterval: FiniteDuration, - lf: String = System.getProperty("line.separator"), + lf: String = java.lang.System.lineSeparator(), charset: Charset = StandardCharsets.UTF_8): Source[String, NotUsed] = apply(path, maxLineSize, 0, pollingInterval) .via(pekko.stream.scaladsl.Framing.delimiter(ByteString.fromString(lf, charset.name), maxLineSize, diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala index fcb19d0b0..e398205ff 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala @@ -86,8 +86,8 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => sinkFactory: C => Sink[T, Future[R]]) extends GraphStageWithMaterializedValue[SinkShape[T], Future[Done]] { - val in = Inlet[T]("LogRotatorSink.in") - override val shape = SinkShape.of(in) + private val in: Inlet[T] = Inlet[T]("LogRotatorSink.in") + override val shape: SinkShape[T] = SinkShape.of(in) override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val promise = Promise[Done]() @@ -96,29 +96,27 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => } private final class Logic(promise: Promise[Done]) extends GraphStageLogic(shape) { - val triggerGenerator: T => Option[C] = triggerGeneratorCreator() - var sourceOut: SubSourceOutlet[T] = _ - var sinkCompletions: immutable.Seq[Future[R]] = immutable.Seq.empty - var isFinishing = false + private val triggerGenerator: T => Option[C] = triggerGeneratorCreator() + private var sourceOut: SubSourceOutlet[T] = _ + private var sinkCompletions: immutable.Seq[Future[R]] = immutable.Seq.empty + private var isFinishing = false - def failThisStage(ex: Throwable): Unit = + private def failThisStage(ex: Throwable): Unit = if (!promise.isCompleted) { - if (sourceOut != null) { + if (sourceOut != null) sourceOut.fail(ex) - } cancel(in) promise.failure(ex) } - def completeThisStage() = { - if (sourceOut != null) { + private def completeThisStage() = { + if (sourceOut != null) sourceOut.complete() - } implicit val executionContext: ExecutionContext = pekko.dispatch.ExecutionContexts.parasitic promise.completeWith(Future.sequence(sinkCompletions).map(_ => Done)) } - def checkTrigger(data: T): Option[C] = + private def checkTrigger(data: T): Option[C] = try { triggerGenerator(data) } catch { @@ -173,7 +171,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => .map(_ => Done)(pekko.dispatch.ExecutionContexts.parasitic) } - def futureCB(newFuture: Future[R]) = + def futureCB(newFuture: Future[R]): AsyncCallback[Holder[R]] = getAsyncCallback[Holder[R]](sinkCompletionCallbackHandler(newFuture)) // we recreate the tail of the stream, and emit the data for the next req @@ -181,11 +179,9 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => val prevOut = Option(sourceOut) sourceOut = new SubSourceOutlet[T]("LogRotatorSink.sub-out") - sourceOut.setHandler(new OutHandler { - override def onPull(): Unit = { - sourceOut.push(data) - switchToNormalMode() - } + sourceOut.setHandler(() => { + sourceOut.push(data) + switchToNormalMode() }) setHandler(in, rotateInHandler) val newFuture = Source @@ -202,23 +198,18 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => } // we change path if needed or push the grabbed data - def switchToNormalMode(): Unit = { - if (isFinishing) { + def switchToNormalMode(): Unit = + if (isFinishing) completeThisStage() - } else { + else { setHandler(in, normalModeInHandler) - sourceOut.setHandler(new OutHandler { - override def onPull(): Unit = - pull(in) - }) + sourceOut.setHandler(() => pull(in)) } - } - val rotateInHandler = + val rotateInHandler: InHandler = new InHandler { - override def onPush(): Unit = { + override def onPush(): Unit = require(requirement = false, "No push should happen while we are waiting for the substream to grab the dangling data!") - } override def onUpstreamFinish(): Unit = { setKeepGoing(true) isFinishing = true @@ -226,7 +217,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => override def onUpstreamFailure(ex: Throwable): Unit = failThisStage(ex) } - val normalModeInHandler = new InHandler { + val normalModeInHandler: InHandler = new InHandler { override def onPush(): Unit = { val data = grab(in) checkTrigger(data) match { @@ -235,9 +226,8 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => } } - override def onUpstreamFinish(): Unit = { + override def onUpstreamFinish(): Unit = completeThisStage() - } override def onUpstreamFailure(ex: Throwable): Unit = failThisStage(ex) diff --git a/file/src/test/java/docs/javadsl/NestedTarReaderTest.java b/file/src/test/java/docs/javadsl/NestedTarReaderTest.java index 50a41c463..278f852c1 100644 --- a/file/src/test/java/docs/javadsl/NestedTarReaderTest.java +++ b/file/src/test/java/docs/javadsl/NestedTarReaderTest.java @@ -73,7 +73,7 @@ public void flowShouldCreateZIPArchive() throws Exception { List metadata = process(file, tempDir, system).toCompletableFuture().get(1, TimeUnit.MINUTES); List names = - metadata.stream().map(md -> md.filePathName()).collect(Collectors.toList()); + metadata.stream().map(TarArchiveMetadata::filePathName).collect(Collectors.toList()); assertThat(names.size(), is(1281)); } diff --git a/file/src/test/scala/docs/scaladsl/DirectorySpec.scala b/file/src/test/scala/docs/scaladsl/DirectorySpec.scala index dd3a7f986..eb992694a 100644 --- a/file/src/test/scala/docs/scaladsl/DirectorySpec.scala +++ b/file/src/test/scala/docs/scaladsl/DirectorySpec.scala @@ -135,4 +135,4 @@ class DirectorySpec fs.close() } -case class SomeContext() +final case class SomeContext() diff --git a/file/src/test/scala/docs/scaladsl/ExecutableUtils.scala b/file/src/test/scala/docs/scaladsl/ExecutableUtils.scala index aad744a4b..f237eebb5 100644 --- a/file/src/test/scala/docs/scaladsl/ExecutableUtils.scala +++ b/file/src/test/scala/docs/scaladsl/ExecutableUtils.scala @@ -29,7 +29,7 @@ object ExecutableUtils { paths.exists(path => Files.isExecutable(path.resolve(bin))) } - def run(bin: String, args: Seq[String], cwd: Path, input: ByteString = ByteString.empty): Future[ByteString] = { + def run(bin: String, args: Seq[String], cwd: Path, input: ByteString = ByteString.empty): Future[ByteString] = Future { val proc = Process(Seq(bin) ++ args, cwd.toFile) var stdout = Option.empty[ByteString] @@ -50,7 +50,6 @@ object ExecutableUtils { case code => throw new RuntimeException(s"Subprocess exited with code $code\n\n${stderr.get.utf8String}") } }(scala.concurrent.ExecutionContext.Implicits.global) - } private def writeStream(stream: OutputStream, content: ByteString): Unit = { try stream.write(content.toArray) diff --git a/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala b/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala index 24f744e83..ceff5154b 100644 --- a/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala +++ b/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala @@ -95,7 +95,7 @@ class LogRotatorSinkSpec "complete when consuming an empty source" in assertAllStagesStopped { val triggerCreator: () => ByteString => Option[Path] = () => { - (element: ByteString) => fail("trigger creator should not be called") + (_: ByteString) => fail("trigger creator should not be called") } val rotatorSink: Sink[ByteString, Future[Done]] = @@ -143,9 +143,9 @@ class LogRotatorSinkSpec var currentFilename: Option[String] = None (_: ByteString) => { val newName = LocalDateTime.now().format(formatter) - if (currentFilename.contains(newName)) { + if (currentFilename.contains(newName)) None - } else { + else { currentFilename = Some(newName) Some(destinationDir.resolve(newName)) } @@ -185,9 +185,9 @@ class LogRotatorSinkSpec val streamBasedTriggerCreator: () => ((String, String)) => Option[Path] = () => { var currentFilename: Option[String] = None (element: (String, String)) => { - if (currentFilename.contains(element._1)) { + if (currentFilename.contains(element._1)) None - } else { + else { currentFilename = Some(element._1) Some(destinationDir.resolve(element._1)) } @@ -221,15 +221,14 @@ class LogRotatorSinkSpec var files = Seq.empty[Path] val triggerFunctionCreator = () => { var fileName: String = null - (element: ByteString) => { + (_: ByteString) => { if (fileName == null) { val path = Files.createTempFile(fs.getPath("/"), "test", ".log") files :+= path fileName = path.toString Some(path) - } else { + } else None - } } } val completion = Source(testByteStrings).runWith(LogRotatorSink(triggerFunctionCreator)) @@ -255,9 +254,8 @@ class LogRotatorSinkSpec "correctly close sinks" in assertAllStagesStopped { val test = (1 to 3).map(_.toString).toList var out = Seq.empty[String] - def add(e: ByteString): Unit = { + def add(e: ByteString): Unit = out = out :+ e.utf8String - } val completion = Source(test.map(ByteString.apply)).runWith( diff --git a/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala b/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala index 97bbb8577..5399a840d 100644 --- a/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala +++ b/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala @@ -148,12 +148,11 @@ class TarArchiveSpec val tenDigits = ByteString("1234567890") val metadata1 = TarArchiveMetadata("dir/file1.txt", tenDigits.length) - val oneFileArchive = { + val oneFileArchive = Source .single(metadata1 -> Source.single(tenDigits)) .via(Archive.tar()) .runWith(collectByteString) - } "emit one file" in { val tar = @@ -161,7 +160,7 @@ class TarArchiveSpec .future(oneFileArchive) .via(Archive.tarReader()) .mapAsync(1) { - case in @ (metadata, source) => + case (metadata, source) => source.runWith(collectByteString).map { bs => metadata -> bs } @@ -295,8 +294,7 @@ class TarArchiveSpec .future(input) .via(Archive.tarReader()) .mapAsync(1) { - case (metadata, source) => - source.runWith(Sink.ignore) + case (_, source) => source.runWith(Sink.ignore) } .runWith(Sink.ignore) val error = tar.failed.futureValue @@ -345,12 +343,12 @@ class TarArchiveSpec val tenDigits = ByteString("1234567890") val metadata1 = TarArchiveMetadata("dir/file1.txt", tenDigits.length) - val nestedArchive = { + val nestedArchive = Source .single(metadata1 -> Source.single(tenDigits)) .via(Archive.tar()) .runWith(collectByteString) - } + val outerArchive: Future[ByteString] = Source .future(nestedArchive) @@ -368,7 +366,7 @@ class TarArchiveSpec res.futureValue shouldBe Seq("nested.tar", "file1.txt") } - def untar(): Flow[ByteString, TarArchiveMetadata, NotUsed] = { + def untar(): Flow[ByteString, TarArchiveMetadata, NotUsed] = Archive .tarReader() .log("untar") @@ -385,7 +383,6 @@ class TarArchiveSpec } .flatMapConcat(identity) .log("untarred") - } } private def getPathFromResources(fileName: String): Path = diff --git a/ftp/src/main/java/org/apache/pekko/stream/connectors/ftp/impl/LegacyFtpsClient.java b/ftp/src/main/java/org/apache/pekko/stream/connectors/ftp/impl/LegacyFtpsClient.java index db82e6f84..474277eb9 100644 --- a/ftp/src/main/java/org/apache/pekko/stream/connectors/ftp/impl/LegacyFtpsClient.java +++ b/ftp/src/main/java/org/apache/pekko/stream/connectors/ftp/impl/LegacyFtpsClient.java @@ -22,10 +22,6 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; -import java.net.Inet6Address; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; import java.net.Socket; import java.util.Base64; diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/CommonFtpOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/CommonFtpOperations.scala index 5b7a56bc8..ac1a275e6 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/CommonFtpOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/CommonFtpOperations.scala @@ -86,13 +86,11 @@ private[ftp] trait CommonFtpOperations { if (os != null) os else throw new IOException(s"Could not write to $name") } - def move(fromPath: String, destinationPath: String, handler: Handler): Unit = { + def move(fromPath: String, destinationPath: String, handler: Handler): Unit = if (!handler.rename(fromPath, destinationPath)) throw new IOException(s"Could not move $fromPath") - } - def remove(path: String, handler: Handler): Unit = { + def remove(path: String, handler: Handler): Unit = if (!handler.deleteFile(path)) throw new IOException(s"Could not delete $path") - } def completePendingCommand(handler: Handler): Boolean = handler.completePendingCommand() @@ -101,17 +99,15 @@ private[ftp] trait CommonFtpOperations { val updatedPath = CommonFtpOperations.concatPath(path, name) handler.makeDirectory(updatedPath) - if (handler.getReplyCode != 257) { + if (handler.getReplyCode != 257) throw new IOException(handler.getReplyString) - } } } private[ftp] object CommonFtpOperations { def concatPath(path: String, name: String): String = - if (path.endsWith("/")) { + if (path.endsWith("/")) path ++ name - } else { + else s"$path/$name" - } } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala index 589624aa4..c455bbe1a 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala @@ -27,12 +27,12 @@ private[ftp] trait FtpBrowserGraphStage[FtpClient, S <: RemoteFileSettings] extends FtpGraphStage[FtpClient, S, FtpFile] { val ftpLike: FtpLike[FtpClient, S] - val branchSelector: FtpFile => Boolean = f => true + val branchSelector: FtpFile => Boolean = _ => true def emitTraversedDirectories: Boolean = false - def createLogic(inheritedAttributes: Attributes): FtpGraphStageLogic[FtpFile, FtpClient, S] = { - val logic = new FtpGraphStageLogic[FtpFile, FtpClient, S](shape, ftpLike, connectionSettings, ftpClient) { + def createLogic(inheritedAttributes: Attributes): FtpGraphStageLogic[FtpFile, FtpClient, S] = + new FtpGraphStageLogic[FtpFile, FtpClient, S](shape, ftpLike, connectionSettings, ftpClient) { private[this] var buffer: Seq[FtpFile] = Seq.empty[FtpFile] @@ -89,7 +89,4 @@ private[ftp] trait FtpBrowserGraphStage[FtpClient, S <: RemoteFileSettings] graphStageFtpLike.listFiles(basePath, handler.get) } // end of stage logic - - logic - } } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala index b3dd39af8..9779fdff4 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala @@ -41,9 +41,9 @@ private[ftp] abstract class FtpGraphStageLogic[T, FtpClient, S <: RemoteFileSett super.preStart() try { val tryConnect = graphStageFtpLike.connect(connectionSettings) - if (tryConnect.isSuccess) { + if (tryConnect.isSuccess) handler = tryConnect.toOption - } else + else tryConnect.failed.foreach { case NonFatal(t) => throw t case _ => diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala index b26f17426..78d1108c6 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala @@ -73,7 +73,8 @@ private[ftp] trait FtpIOSourceStage[FtpClient, S <: RemoteFileSettings] val shape: SourceShape[ByteString] = SourceShape(Outlet[ByteString](s"$name.out")) val out: Outlet[ByteString] = shape.outlets.head.asInstanceOf[Outlet[ByteString]] - def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { + def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (FtpGraphStageLogic[ByteString, FtpClient, S], Future[IOResult]) = { val matValuePromise = Promise[IOResult]() @@ -194,7 +195,8 @@ private[ftp] trait FtpIOSinkStage[FtpClient, S <: RemoteFileSettings] val shape: SinkShape[ByteString] = SinkShape(Inlet[ByteString](s"$name.in")) val in: Inlet[ByteString] = shape.inlets.head.asInstanceOf[Inlet[ByteString]] - def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { + def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (FtpGraphStageLogic[ByteString, FtpClient, S], Future[IOResult]) = { val matValuePromise = Promise[IOResult]() @@ -262,7 +264,7 @@ private[ftp] trait FtpIOSinkStage[FtpClient, S <: RemoteFileSettings] matValuePromise.tryFailure(new IOOperationIncompleteException(writtenBytesTotal, t)) /** BLOCKING I/O WRITE */ - private[this] def write(bytes: ByteString) = + private[this] def write(bytes: ByteString): Unit = osOpt.foreach { os => os.write(bytes.toArray) writtenBytesTotal += bytes.size @@ -289,7 +291,8 @@ private[ftp] trait FtpMoveSink[FtpClient, S <: RemoteFileSettings] def shape: SinkShape[FtpFile] = SinkShape(in) - def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { + def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (FtpGraphStageLogic[FtpFile, FtpClient, S], Future[IOResult]) = { val matValuePromise = Promise[IOResult]() var numberOfMovedFiles = 0 @@ -298,7 +301,7 @@ private[ftp] trait FtpMoveSink[FtpClient, S <: RemoteFileSettings] setHandler( in, new InHandler { - override def onPush(): Unit = { + override def onPush(): Unit = try { val sourcePath = grab(in) graphStageFtpLike.move(sourcePath.path, destinationPath(sourcePath), handler.get) @@ -310,7 +313,6 @@ private[ftp] trait FtpMoveSink[FtpClient, S <: RemoteFileSettings] matFailure(e) failStage(e) } - } override def onUpstreamFailure(exception: Throwable): Unit = { matFailure(exception) @@ -346,7 +348,8 @@ private[ftp] trait FtpRemoveSink[FtpClient, S <: RemoteFileSettings] def shape: SinkShape[FtpFile] = SinkShape(in) - def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { + def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (FtpGraphStageLogic[Unit, FtpClient, S], Future[IOResult]) = { val matValuePromise = Promise[IOResult]() var numberOfRemovedFiles = 0 val logic = new FtpGraphStageLogic[Unit, FtpClient, S](shape, ftpLike, connectionSettings, ftpClient) { @@ -354,7 +357,7 @@ private[ftp] trait FtpRemoveSink[FtpClient, S <: RemoteFileSettings] setHandler( in, new InHandler { - override def onPush(): Unit = { + override def onPush(): Unit = try { graphStageFtpLike.remove(grab(in).path, handler.get) numberOfRemovedFiles = numberOfRemovedFiles + 1 @@ -365,7 +368,6 @@ private[ftp] trait FtpRemoveSink[FtpClient, S <: RemoteFileSettings] matFailure(e) failStage(e) } - } override def onUpstreamFailure(exception: Throwable): Unit = { matFailure(exception) diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala index de0990fd0..5a1bb99c7 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala @@ -28,9 +28,8 @@ private[ftp] trait FtpOperations extends CommonFtpOperations { self: FtpLike[FTP def connect(connectionSettings: FtpSettings)(implicit ftpClient: FTPClient): Try[Handler] = Try { connectionSettings.proxy.foreach(ftpClient.setProxy) - if (ftpClient.getAutodetectUTF8() != connectionSettings.autodetectUTF8) { + if (ftpClient.getAutodetectUTF8 != connectionSettings.autodetectUTF8) ftpClient.setAutodetectUTF8(connectionSettings.autodetectUTF8) - } try { ftpClient.connect(connectionSettings.host, connectionSettings.port) @@ -47,19 +46,16 @@ private[ftp] trait FtpOperations extends CommonFtpOperations { self: FtpLike[FTP ftpClient.login( connectionSettings.credentials.username, connectionSettings.credentials.password) - if (ftpClient.getReplyCode == 530) { + if (ftpClient.getReplyCode == 530) throw new FtpAuthenticationException( s"unable to login to host=[${connectionSettings.host}], port=${connectionSettings.port} ${connectionSettings.proxy .fold("")("proxy=" + _.toString)}") - } - if (connectionSettings.binary) { + if (connectionSettings.binary) ftpClient.setFileType(FTP.BINARY_FILE_TYPE) - } - if (connectionSettings.passiveMode) { + if (connectionSettings.passiveMode) ftpClient.enterLocalPassiveMode() - } ftpClient } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpSourceFactory.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpSourceFactory.scala index dc27c43a5..6b7db20c9 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpSourceFactory.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpSourceFactory.scala @@ -60,7 +60,7 @@ private[ftp] trait FtpSourceFactory[FtpClient, S <: RemoteFileSettings] { self = val connectionSettings: S = _connectionSettings val ftpClient: S => FtpClient = self.ftpClient val ftpLike: FtpLike[FtpClient, S] = _ftpLike - override val branchSelector: (FtpFile) => Boolean = _branchSelector + override val branchSelector: FtpFile => Boolean = _branchSelector override val emitTraversedDirectories: Boolean = _emitTraversedDirectories } @@ -116,7 +116,7 @@ private[ftp] trait FtpSourceFactory[FtpClient, S <: RemoteFileSettings] { self = protected[this] def createMoveSink( _destinationPath: FtpFile => String, - _connectionSettings: S)(implicit _ftpLike: FtpLike[FtpClient, S]) = + _connectionSettings: S)(implicit _ftpLike: FtpLike[FtpClient, S]): FtpMoveSink[FtpClient, S] = new FtpMoveSink[FtpClient, S] { val connectionSettings: S = _connectionSettings val ftpClient: S => FtpClient = self.ftpClient @@ -125,7 +125,7 @@ private[ftp] trait FtpSourceFactory[FtpClient, S <: RemoteFileSettings] { self = } protected[this] def createRemoveSink( - _connectionSettings: S)(implicit _ftpLike: FtpLike[FtpClient, S]) = + _connectionSettings: S)(implicit _ftpLike: FtpLike[FtpClient, S]): FtpRemoveSink[FtpClient, S] = new FtpRemoveSink[FtpClient, S] { val connectionSettings: S = _connectionSettings val ftpClient: S => FtpClient = self.ftpClient @@ -143,10 +143,10 @@ private[ftp] trait FtpSourceFactory[FtpClient, S <: RemoteFileSettings] { self = */ @InternalApi private[ftp] trait FtpSource extends FtpSourceFactory[FTPClient, FtpSettings] { - protected final val FtpBrowserSourceName = "FtpBrowserSource" - protected final val FtpIOSourceName = "FtpIOSource" - protected final val FtpDirectorySource = "FtpDirectorySource" - protected final val FtpIOSinkName = "FtpIOSink" + private final val FtpBrowserSourceName = "FtpBrowserSource" + private final val FtpIOSourceName = "FtpIOSource" + private final val FtpDirectorySource = "FtpDirectorySource" + private final val FtpIOSinkName = "FtpIOSink" protected val ftpClient: FtpSettings => FTPClient = _ => new FTPClient protected val ftpBrowserSourceName: String = FtpBrowserSourceName @@ -160,17 +160,16 @@ private[ftp] trait FtpSource extends FtpSourceFactory[FTPClient, FtpSettings] { */ @InternalApi private[ftp] trait FtpsSource extends FtpSourceFactory[FTPClient, FtpsSettings] { - protected final val FtpsBrowserSourceName = "FtpsBrowserSource" - protected final val FtpsIOSourceName = "FtpsIOSource" - protected final val FtpsDirectorySource = "FtpsDirectorySource" - protected final val FtpsIOSinkName = "FtpsIOSink" + private final val FtpsBrowserSourceName = "FtpsBrowserSource" + private final val FtpsIOSourceName = "FtpsIOSource" + private final val FtpsDirectorySource = "FtpsDirectorySource" + private final val FtpsIOSinkName = "FtpsIOSink" protected val ftpClient: FtpsSettings => FTPClient = settings => { - if (settings.useUpdatedFtpsClient) { + if (settings.useUpdatedFtpsClient) new FTPSClient(settings.useFtpsImplicit) - } else { + else new LegacyFtpsClient(settings.useFtpsImplicit) - } } protected val ftpBrowserSourceName: String = FtpsBrowserSourceName protected val ftpIOSourceName: String = FtpsIOSourceName @@ -183,10 +182,10 @@ private[ftp] trait FtpsSource extends FtpSourceFactory[FTPClient, FtpsSettings] */ @InternalApi private[ftp] trait SftpSource extends FtpSourceFactory[SSHClient, SftpSettings] { - protected final val sFtpBrowserSourceName = "sFtpBrowserSource" - protected final val sFtpIOSourceName = "sFtpIOSource" - protected final val sFtpDirectorySource = "sFtpDirectorySource" - protected final val sFtpIOSinkName = "sFtpIOSink" + private final val sFtpBrowserSourceName = "sFtpBrowserSource" + private final val sFtpIOSourceName = "sFtpIOSource" + private final val sFtpDirectorySource = "sFtpDirectorySource" + private final val sFtpIOSinkName = "sFtpIOSink" def sshClient(): SSHClient = new SSHClient() protected val ftpClient: SftpSettings => SSHClient = _ => sshClient() diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala index f457fec62..8fbc08bd5 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala @@ -41,9 +41,8 @@ private[ftp] trait FtpsOperations extends CommonFtpOperations { case _ => } - if (ftpClient.getAutodetectUTF8() != connectionSettings.autodetectUTF8) { + if (ftpClient.getAutodetectUTF8 != connectionSettings.autodetectUTF8) ftpClient.setAutodetectUTF8(connectionSettings.autodetectUTF8) - } ftpClient.connect(connectionSettings.host, connectionSettings.port) @@ -52,19 +51,16 @@ private[ftp] trait FtpsOperations extends CommonFtpOperations { ftpClient.login( connectionSettings.credentials.username, connectionSettings.credentials.password) - if (ftpClient.getReplyCode == 530) { + if (ftpClient.getReplyCode == 530) throw new FtpAuthenticationException( s"unable to login to host=[${connectionSettings.host}], port=${connectionSettings.port} ${connectionSettings.proxy .fold("")("proxy=" + _.toString)}") - } - if (connectionSettings.binary) { + if (connectionSettings.binary) ftpClient.setFileType(FTP.BINARY_FILE_TYPE) - } - if (connectionSettings.passiveMode) { + if (connectionSettings.passiveMode) ftpClient.enterLocalPassiveMode() - } ftpClient } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala index 04cbd77bd..fb38a158c 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala @@ -47,11 +47,10 @@ private[ftp] trait SftpOperations { self: FtpLike[SSHClient, SftpSettings] => proxy.foreach(p => ssh.setSocketFactory(new DefaultSocketFactory(p))) - if (!strictHostKeyChecking) { + if (!strictHostKeyChecking) ssh.addHostKeyVerifier(new PromiscuousVerifier) - } else { + else knownHosts.foreach(path => ssh.loadKnownHosts(new File(path))) - } ssh.connect(host.getHostAddress, port) sftpIdentity match { @@ -66,13 +65,11 @@ private[ftp] trait SftpOperations { self: FtpLike[SSHClient, SftpSettings] => }) ssh.auth(credentials.username, passwordAuth, keyAuth) - } else { + } else ssh.auth(credentials.username, keyAuth) - } case None => - if (credentials.password != "") { + if (credentials.password != "") ssh.authPassword(credentials.username, credentials.password) - } } ssh.newSFTPClient() diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala index 9eb10574a..a7f542598 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala @@ -29,6 +29,8 @@ import pekko.{ Done, NotUsed } import net.schmizz.sshj.SSHClient import org.apache.commons.net.ftp.FTPClient +import scala.annotation.tailrec + @DoNotInherit sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { self: FtpSourceFactory[FtpClient, S] => @@ -211,7 +213,7 @@ sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { self: FtpSourceFactory * @param basePath path to start with * @param name name of a directory to create * @param connectionSettings connection settings - * @param materializer materializer + * @param mat materializer * @return [[java.util.concurrent.CompletionStage CompletionStage]] of [[pekko.Done]] indicating a materialized, asynchronous request * @deprecated pass in the actor system instead of the materializer, since Alpakka 3.0.0 */ @@ -275,9 +277,7 @@ sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { self: FtpSourceFactory def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] protected[javadsl] def func[T, R](f: T => R): pekko.japi.function.Function[T, R] = - new pekko.japi.function.Function[T, R] { - override def apply(param: T): R = f(param) - } + (param: T) => f(param) } object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { @@ -344,12 +344,12 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { mkdir(basePath, name, connectionSettings).runWith(sink, mat) } + @tailrec def mkdirAsync(basePath: String, name: String, connectionSettings: S, - system: ClassicActorSystemProvider): CompletionStage[Done] = { + system: ClassicActorSystemProvider): CompletionStage[Done] = mkdirAsync(basePath, name, connectionSettings, system.classicSystem) - } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { import pekko.util.FutureConverters._ @@ -438,12 +438,12 @@ object Ftps extends FtpApi[FTPClient, FtpsSettings] with FtpsSourceParams { mkdir(basePath, name, connectionSettings).runWith(sink, mat) } + @tailrec def mkdirAsync(basePath: String, name: String, connectionSettings: S, - system: ClassicActorSystemProvider): CompletionStage[Done] = { + system: ClassicActorSystemProvider): CompletionStage[Done] = mkdirAsync(basePath, name, connectionSettings, system.classicSystem) - } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { import pekko.util.FutureConverters._ @@ -536,9 +536,8 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { def mkdirAsync(basePath: String, name: String, connectionSettings: S, - system: ClassicActorSystemProvider): CompletionStage[Done] = { + system: ClassicActorSystemProvider): CompletionStage[Done] = mkdirAsync(basePath, name, connectionSettings, system.classicSystem) - } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { import pekko.util.FutureConverters._ diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala index 90e348cac..e9f749c38 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala @@ -129,7 +129,7 @@ final class FtpSettings private ( configureConnection = configureConnection, proxy = proxy) - override def toString = + override def toString: String = "FtpSettings(" + s"host=$host," + s"port=$port," + @@ -147,7 +147,7 @@ final class FtpSettings private ( object FtpSettings { /** Default FTP port (21) */ - final val DefaultFtpPort = 21 + final val DefaultFtpPort: Int = 21 /** Scala API */ def apply(host: java.net.InetAddress): FtpSettings = new FtpSettings( @@ -250,7 +250,7 @@ final class FtpsSettings private ( keyManager = keyManager, trustManager = trustManager) - override def toString = + override def toString: String = "FtpsSettings(" + s"host=$host," + s"port=$port," + @@ -272,7 +272,7 @@ final class FtpsSettings private ( object FtpsSettings { /** Default FTPs port (2222) */ - final val DefaultFtpsPort = 2222 + final val DefaultFtpsPort: Int = 2222 /** Scala API */ def apply(host: java.net.InetAddress): FtpsSettings = new FtpsSettings( @@ -344,7 +344,7 @@ final class SftpSettings private ( proxy = proxy, maxUnconfirmedReads = maxUnconfirmedReads) - override def toString = + override def toString: String = "SftpSettings(" + s"host=$host," + s"port=$port," + @@ -362,7 +362,7 @@ final class SftpSettings private ( object SftpSettings { /** Default SFTP port (22) */ - final val DefaultSftpPort = 22 + final val DefaultSftpPort: Int = 22 /** Scala API */ def apply(host: java.net.InetAddress): SftpSettings = new SftpSettings( @@ -392,7 +392,7 @@ abstract sealed class FtpCredentials { * FTP credentials factory */ object FtpCredentials { - final val Anonymous = "anonymous" + final val Anonymous: String = "anonymous" val anonymous: FtpCredentials = AnonFtpCredentials @@ -415,7 +415,7 @@ object FtpCredentials { final class NonAnonFtpCredentials @InternalApi private[FtpCredentials] ( val username: String, val password: String) extends FtpCredentials { - override def toString = + override def toString: String = s"FtpCredentials(username=$username,password.nonEmpty=${password.nonEmpty})" } diff --git a/ftp/src/test/java/docs/javadsl/FtpWritingTest.java b/ftp/src/test/java/docs/javadsl/FtpWritingTest.java index 1234fefd3..af89f11c6 100644 --- a/ftp/src/test/java/docs/javadsl/FtpWritingTest.java +++ b/ftp/src/test/java/docs/javadsl/FtpWritingTest.java @@ -66,10 +66,8 @@ FtpSettings ftpSettings() throws Exception { .withPassiveMode(true) // only useful for debugging .withConfigureConnectionConsumer( - (FTPClient ftpClient) -> { - ftpClient.addProtocolCommandListener( - new PrintCommandListener(new PrintWriter(System.out), true)); - }); + (FTPClient ftpClient) -> ftpClient.addProtocolCommandListener( + new PrintCommandListener(new PrintWriter(System.out), true))); // #create-settings return ftpSettings; } diff --git a/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/CommonFtpStageTest.java b/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/CommonFtpStageTest.java index 331e523be..ea1d48b23 100644 --- a/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/CommonFtpStageTest.java +++ b/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/CommonFtpStageTest.java @@ -17,7 +17,6 @@ import org.apache.pekko.actor.ActorSystem; import org.apache.pekko.japi.Pair; import org.apache.pekko.stream.IOResult; -import org.apache.pekko.stream.Materializer; import org.apache.pekko.stream.javadsl.Keep; import org.apache.pekko.stream.javadsl.Sink; import org.apache.pekko.stream.javadsl.Source; diff --git a/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/FtpsStageTest.java b/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/FtpsStageTest.java index 0e899d2a1..8ae104986 100644 --- a/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/FtpsStageTest.java +++ b/ftp/src/test/java/org/apache/pekko/stream/connectors/ftp/FtpsStageTest.java @@ -20,7 +20,6 @@ import org.apache.pekko.stream.javadsl.Sink; import org.apache.pekko.stream.javadsl.Source; import org.apache.pekko.util.ByteString; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; diff --git a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/BaseSpec.scala b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/BaseSpec.scala index 966cf066c..7e25749a2 100644 --- a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/BaseSpec.scala +++ b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/BaseSpec.scala @@ -69,7 +69,7 @@ trait BaseSpec cleanFiles() } - override protected def afterAll() = { + override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(getSystem, verifySystemShutdown = true) super.afterAll() } diff --git a/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala b/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala index 2fd6852b3..b17043ad2 100644 --- a/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala +++ b/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala @@ -24,15 +24,14 @@ private[pekko] trait ObjectDecoder { implicit witness: Witness.Aux[K], hDecoder: Lazy[PdxDecoder[H]], tDecoder: Lazy[PdxDecoder[T]]): PdxDecoder[FieldType[K, H] :: T] = PdxDecoder.instance { - case (reader, fieldName) => { + case (reader, fieldName) => val headField = hDecoder.value.decode(reader, witness.value) val tailFields = tDecoder.value.decode(reader, fieldName) (headField, tailFields) match { case (Success(h), Success(t)) => Success(field[K](h) :: t) case _ => Failure(null) } - } - case e => Failure(null) + case _ => Failure(null) } implicit def objectDecoder[A, Repr <: HList]( diff --git a/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala index 835aac0e1..58ea8da3e 100644 --- a/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala +++ b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala @@ -50,7 +50,6 @@ private[pekko] trait LabelledGeneric[A] { @InternalApi private[pekko] object LabelledGeneric { type Aux[A, R] = LabelledGeneric[A] { type Repr = R } - inline def apply[A](using l: LabelledGeneric[A]): LabelledGeneric.Aux[A, l.Repr] = l inline given productInst[A <: Product]( diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/GeodeCache.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/GeodeCache.scala index fc8253120..45b3a8576 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/GeodeCache.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/GeodeCache.scala @@ -46,7 +46,7 @@ private[geode] abstract class GeodeCache(geodeSettings: GeodeSettings) { *
  • customized by client application
  • * */ - final protected def newCacheFactory(): ClientCacheFactory = { + private final def newCacheFactory(): ClientCacheFactory = { val factory = configure(new ClientCacheFactory().setPdxSerializer(serializer)) geodeSettings.configure.map(_(factory)).getOrElse(factory) } diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala index f3fb18d21..9b6b12d86 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala @@ -30,9 +30,7 @@ trait PdxDecoder[A] { object PdxDecoder extends ObjectDecoder { private[pekko] def instance[A](f: (PdxReader, Symbol) => Try[A]): PdxDecoder[A] = - new PdxDecoder[A] { - def decode(reader: PdxReader, fieldName: Symbol) = f(reader, fieldName) - } + (reader: PdxReader, fieldName: Symbol) => f(reader, fieldName) implicit val booleanDecoder: PdxDecoder[Boolean] = instance { case (reader, fieldName) => diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeCQueryGraphLogic.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeCQueryGraphLogic.scala index 96fa7558e..f92eaebe5 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeCQueryGraphLogic.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeCQueryGraphLogic.scala @@ -46,13 +46,13 @@ private[geode] abstract class GeodeCQueryGraphLogic[V](val shape: SourceShape[V] private var query: CqQuery = _ - override def executeQuery() = Try { + override def executeQuery(): Try[util.Iterator[V]] = Try { val cqf = new CqAttributesFactory() val eventListener = new CqListenerAdapter() { override def onEvent(ev: CqEvent): Unit = - onGeodeElement(ev.getNewValue().asInstanceOf[V]) + onGeodeElement(ev.getNewValue.asInstanceOf[V]) override def onError(ev: CqEvent): Unit = log.error(ev.getThrowable, s"$ev") @@ -68,11 +68,11 @@ private[geode] abstract class GeodeCQueryGraphLogic[V](val shape: SourceShape[V] query = qs.newCq(queryName, sql, cqa) - buildInitialResulsIterator(query) + buildInitialResultsIterator(query) } - private def buildInitialResulsIterator(q: CqQuery) = { + private def buildInitialResultsIterator(q: CqQuery) = { val res = q.executeWithInitialResults[Struct] val it = res.iterator() new util.Iterator[V] { @@ -86,12 +86,12 @@ private[geode] abstract class GeodeCQueryGraphLogic[V](val shape: SourceShape[V] /** * May lock on semaphore.acquires(). */ - protected def onGeodeElement(v: V): Unit = { + private def onGeodeElement(v: V): Unit = { semaphore.acquire() onElement.invoke(v) } - protected def incomingQueueIsEmpty = incomingQueue.isEmpty + protected def incomingQueueIsEmpty: Boolean = incomingQueue.isEmpty protected def enqueue(v: V): Unit = incomingQueue.enqueue(v) @@ -105,7 +105,7 @@ private[geode] abstract class GeodeCQueryGraphLogic[V](val shape: SourceShape[V] /** * Pushes an element downstream and releases a semaphore acquired in onGeodeElement. */ - protected def pushElement(out: Outlet[V], element: V) = { + protected def pushElement(out: Outlet[V], element: V): Unit = { push(out, element) semaphore.release() } @@ -122,12 +122,12 @@ private[geode] abstract class GeodeCQueryGraphLogic[V](val shape: SourceShape[V] @volatile private var upstreamTerminated = false - val inFinish: AsyncCallback[Unit] = getAsyncCallback[Unit] { v => + private val inFinish: AsyncCallback[Unit] = getAsyncCallback[Unit] { _ => upstreamTerminated = true - handleTerminaison() + handleTermination() } - def handleTerminaison() = + def handleTermination(): Unit = if (upstreamTerminated && incomingQueue.isEmpty) completeStage() diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeContinuousSourceStage.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeContinuousSourceStage.scala index e34449ca4..0f08bfd3a 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeContinuousSourceStage.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeContinuousSourceStage.scala @@ -29,7 +29,7 @@ private[geode] class GeodeContinuousSourceStage[V](cache: ClientCache, name: Str override protected def initialAttributes: Attributes = super.initialAttributes and Attributes.name("GeodeContinuousSource") and ActorAttributes.IODispatcher - val out = Outlet[V](s"geode.continuousSource") + val out: Outlet[V] = Outlet[V](s"geode.continuousSource") override def shape: SourceShape[V] = SourceShape.of(out) @@ -38,16 +38,16 @@ private[geode] class GeodeContinuousSourceStage[V](cache: ClientCache, name: Str (new GeodeCQueryGraphLogic[V](shape, cache, name, sql) { - override val onConnect: AsyncCallback[Unit] = getAsyncCallback[Unit] { v => + override val onConnect: AsyncCallback[Unit] = getAsyncCallback[Unit] { _ => subPromise.success(Done) } val onElement: AsyncCallback[V] = getAsyncCallback[V] { element => - if (isAvailable(out) && incomingQueueIsEmpty) { + if (isAvailable(out) && incomingQueueIsEmpty) pushElement(out, element) - } else + else enqueue(element) - handleTerminaison() + handleTermination() } // @@ -56,14 +56,14 @@ private[geode] class GeodeContinuousSourceStage[V](cache: ClientCache, name: Str setHandler( out, new OutHandler { - override def onPull() = { + override def onPull(): Unit = { if (initialResultsIterator.hasNext) push(out, initialResultsIterator.next()) else dequeue().foreach { e => pushElement(out, e) } - handleTerminaison() + handleTermination() } }) diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFiniteSourceStage.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFiniteSourceStage.scala index 3a605e602..b64f6be5d 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFiniteSourceStage.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFiniteSourceStage.scala @@ -38,14 +38,14 @@ private[geode] class GeodeFiniteSourceStage[V](cache: ClientCache, sql: String) (new GeodeQueryGraphLogic[V](shape, cache, sql) { - override val onConnect: AsyncCallback[Unit] = getAsyncCallback[Unit] { v => + override val onConnect: AsyncCallback[Unit] = getAsyncCallback[Unit] { _ => subPromise.success(Done) } setHandler( out, new OutHandler { - override def onPull() = + override def onPull(): Unit = if (initialResultsIterator.hasNext) push(out, initialResultsIterator.next()) else diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFlowStage.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFlowStage.scala index dea96357b..bb299bce8 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFlowStage.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeFlowStage.scala @@ -31,26 +31,26 @@ private[geode] class GeodeFlowStage[K, T <: AnyRef](cache: ClientCache, settings private val in = Inlet[T]("geode.in") private val out = Outlet[T]("geode.out") - override val shape = FlowShape(in, out) + override val shape: FlowShape[T, T] = FlowShape(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with StageLogging with GeodeCapabilities[K, T] { - override protected def logSource = classOf[GeodeFlowStage[K, T]] + override protected def logSource: Class[GeodeFlowStage[K, T]] = classOf[GeodeFlowStage[K, T]] - val regionSettings = settings + val regionSettings: RegionSettings[K, T] = settings - val clientCache = cache + val clientCache: ClientCache = cache setHandler(out, new OutHandler { - override def onPull() = + override def onPull(): Unit = pull(in) }) setHandler(in, new InHandler { - override def onPush() = { + override def onPush(): Unit = { val msg = grab(in) put(msg) @@ -60,7 +60,7 @@ private[geode] class GeodeFlowStage[K, T <: AnyRef](cache: ClientCache, settings }) - override def postStop() = { + override def postStop(): Unit = { log.debug("Stage completed") close() } diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeQueryGraphLogic.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeQueryGraphLogic.scala index ca7532e99..6f2ec2be3 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeQueryGraphLogic.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeQueryGraphLogic.scala @@ -20,6 +20,7 @@ import pekko.stream.stage.StageLogging import org.apache.geode.cache.client.ClientCache import org.apache.geode.cache.query.SelectResults +import java.util import scala.util.Try @InternalApi @@ -29,7 +30,7 @@ private[geode] abstract class GeodeQueryGraphLogic[V](val shape: SourceShape[V], extends GeodeSourceStageLogic[V](shape, clientCache) with StageLogging { - override def executeQuery() = Try { + override def executeQuery(): Try[util.Iterator[V]] = Try { qs.newQuery(query) .execute() .asInstanceOf[SelectResults[V]] diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeSourceStageLogic.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeSourceStageLogic.scala index 00cc25364..ff8e7f0ac 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeSourceStageLogic.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/stage/GeodeSourceStageLogic.scala @@ -18,6 +18,7 @@ import pekko.annotation.InternalApi import pekko.stream.SourceShape import pekko.stream.stage.{ AsyncCallback, GraphStageLogic } import org.apache.geode.cache.client.ClientCache +import org.apache.geode.cache.query.QueryService import scala.util.{ Failure, Success, Try } @@ -29,7 +30,7 @@ private[geode] abstract class GeodeSourceStageLogic[V](shape: SourceShape[V], cl val onConnect: AsyncCallback[Unit] - lazy val qs = clientCache.getQueryService() + lazy val qs: QueryService = clientCache.getQueryService() def executeQuery(): Try[java.util.Iterator[V]] diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/scaladsl/Geode.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/scaladsl/Geode.scala index 3292a393b..f23358cf2 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/scaladsl/Geode.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/scaladsl/Geode.scala @@ -58,9 +58,7 @@ class Geode(settings: GeodeSettings) extends GeodeCache(settings) { */ def query[V <: AnyRef]( query: String)(implicit tag: ClassTag[V], enc: PdxEncoder[V], dec: PdxDecoder[V]): Source[V, Future[Done]] = { - registerPDXSerializer(new ShapelessPdxSerializer[V](enc, dec), tag.runtimeClass) - Source.fromGraph(new GeodeFiniteSourceStage[V](cache, query)) } @@ -91,15 +89,13 @@ trait PoolSubscription extends Geode { /** * Pool subscription is mandatory for continuous query. */ - final override protected def configure(factory: ClientCacheFactory) = + final override protected def configure(factory: ClientCacheFactory): ClientCacheFactory = super.configure(factory).setPoolSubscriptionEnabled(true) def continuousQuery[V <: AnyRef](queryName: Symbol, query: String, serializer: PekkoPdxSerializer[V]): Source[V, Future[Done]] = { - registerPDXSerializer(serializer, serializer.clazz) - Source.fromGraph(new GeodeContinuousSourceStage[V](cache, queryName.name, query)) } @@ -115,7 +111,7 @@ trait PoolSubscription extends Geode { Source.fromGraph(new GeodeContinuousSourceStage[V](cache, queryName.name, query)) } - def closeContinuousQuery(queryName: Symbol) = + def closeContinuousQuery(queryName: Symbol): Option[Unit] = for { qs <- Option(cache.getQueryService()) query <- Option(qs.getCq(queryName.name)) diff --git a/geode/src/test/java/docs/javadsl/GeodeFiniteSourceTestCase.java b/geode/src/test/java/docs/javadsl/GeodeFiniteSourceTestCase.java index ea37c2fc1..6ae8411bf 100644 --- a/geode/src/test/java/docs/javadsl/GeodeFiniteSourceTestCase.java +++ b/geode/src/test/java/docs/javadsl/GeodeFiniteSourceTestCase.java @@ -35,9 +35,7 @@ public void finiteSourceTest() throws ExecutionException, InterruptedException { geode .query("select * from /persons", new PersonPdxSerializer()) .runForeach( - p -> { - LOGGER.debug(p.toString()); - }, + p -> LOGGER.debug(p.toString()), system); // #query @@ -47,9 +45,7 @@ public void finiteSourceTest() throws ExecutionException, InterruptedException { geode .query("select * from /animals", new AnimalPdxSerializer()) .runForeach( - p -> { - LOGGER.debug(p.toString()); - }, + p -> LOGGER.debug(p.toString()), system); animalsDone.toCompletableFuture().get(); diff --git a/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala b/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala index 46dca6113..bebaccca3 100644 --- a/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala +++ b/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala @@ -44,7 +44,7 @@ class GeodeBaseSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll wit * @param f */ def it(f: GeodeSettings => Unit): Unit = - f(GeodeSettings(sys.env.get("IT_GEODE_HOSTNAME").getOrElse("localhost"))) + f(GeodeSettings(sys.env.getOrElse("IT_GEODE_HOSTNAME", "localhost"))) protected def buildPersonsSource(range: Range): Source[Person, Any] = Source(range).map(i => Person(i, s"Person Scala $i", new Date())) @@ -53,7 +53,7 @@ class GeodeBaseSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll wit Source(range).map(i => Animal(i, s"Animal Scala $i", 1)) protected def buildComplexesSource(range: Range): Source[Complex, Any] = - Source(range).map(i => Complex(UUID.randomUUID(), List(1, 2, 3), List(new Date()), Set(UUID.randomUUID()))) + Source(range).map(_ => Complex(UUID.randomUUID(), List(1, 2, 3), List(new Date()), Set(UUID.randomUUID()))) override protected def afterAll(): Unit = Await.result(system.terminate(), 10 seconds) diff --git a/geode/src/test/scala/docs/scaladsl/GeodeContinuousSourceSpec.scala b/geode/src/test/scala/docs/scaladsl/GeodeContinuousSourceSpec.scala index fb90e2bcd..5855f62f7 100644 --- a/geode/src/test/scala/docs/scaladsl/GeodeContinuousSourceSpec.scala +++ b/geode/src/test/scala/docs/scaladsl/GeodeContinuousSourceSpec.scala @@ -44,12 +44,10 @@ class GeodeContinuousSourceSpec extends GeodeBaseSpec { .continuousQuery[Person](Symbol("test"), s"select * from /persons") .runWith(Sink.fold(0) { (c, p) => log.debug(s"$p $c") - if (c == 19) { + if (c == 19) geode.closeContinuousQuery(Symbol("test")).foreach { _ => log.debug("test cQuery is closed") } - - } c + 1 }) // #continuousQuery diff --git a/geode/src/test/scala/docs/scaladsl/Model.scala b/geode/src/test/scala/docs/scaladsl/Model.scala index b069404af..a8650f899 100644 --- a/geode/src/test/scala/docs/scaladsl/Model.scala +++ b/geode/src/test/scala/docs/scaladsl/Model.scala @@ -15,7 +15,7 @@ package docs.scaladsl import java.util.{ Date, UUID } -case class Person(id: Int, name: String, birthDate: Date) -case class Animal(id: Int, name: String, owner: Int) +final case class Person(id: Int, name: String, birthDate: Date) +final case class Animal(id: Int, name: String, owner: Int) -case class Complex(id: UUID, ints: List[Int], dates: List[Date], ids: Set[UUID] = Set()) +final case class Complex(id: UUID, ints: List[Int], dates: List[Date], ids: Set[UUID] = Set()) diff --git a/geode/src/test/scala/docs/scaladsl/PersonPdxSerializer.scala b/geode/src/test/scala/docs/scaladsl/PersonPdxSerializer.scala index 3db69b969..518f1e747 100644 --- a/geode/src/test/scala/docs/scaladsl/PersonPdxSerializer.scala +++ b/geode/src/test/scala/docs/scaladsl/PersonPdxSerializer.scala @@ -23,14 +23,14 @@ object PersonPdxSerializer extends PekkoPdxSerializer[Person] { override def clazz: Class[Person] = classOf[Person] override def toData(o: scala.Any, out: PdxWriter): Boolean = - if (o.isInstanceOf[Person]) { - val p = o.asInstanceOf[Person] - out.writeInt("id", p.id) - out.writeString("name", p.name) - out.writeDate("birthDate", p.birthDate) - true - } else - false + o match { + case p: Person => + out.writeInt("id", p.id) + out.writeString("name", p.name) + out.writeDate("birthDate", p.birthDate) + true + case _ => false + } override def fromData(clazz: Class[_], in: PdxReader): AnyRef = { val id: Int = in.readInt("id") diff --git a/google-cloud-bigquery-storage/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes b/google-cloud-bigquery-storage/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..63d2ec397 --- /dev/null +++ b/google-cloud-bigquery-storage/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.scaladsl.GrpcBigQueryStorageReaderExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.scaladsl.GrpcBigQueryStorageReaderExt.lookup") diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryRecordMapImpl.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryRecordMapImpl.scala index 894295d45..696e0f7a1 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryRecordMapImpl.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryRecordMapImpl.scala @@ -23,13 +23,13 @@ trait BigQueryRecord { object BigQueryRecord { - def fromMap(map: Map[String, Object]): BigQueryRecord = new BigQueryRecordMapImpl(map) + def fromMap(map: Map[String, Object]): BigQueryRecord = BigQueryRecordMapImpl(map) - def fromAvro(record: GenericRecord): BigQueryRecord = new BigQueryRecordAvroImpl(record) + def fromAvro(record: GenericRecord): BigQueryRecord = BigQueryRecordAvroImpl(record) } -case class BigQueryRecordAvroImpl(record: GenericRecord) extends BigQueryRecord { +final case class BigQueryRecordAvroImpl(record: GenericRecord) extends BigQueryRecord { override def get(column: String): Option[Object] = Option(record.get(column)) @@ -42,7 +42,7 @@ case class BigQueryRecordAvroImpl(record: GenericRecord) extends BigQueryRecord } -case class BigQueryRecordMapImpl(map: Map[String, Object]) extends BigQueryRecord { +final case class BigQueryRecordMapImpl(map: Map[String, Object]) extends BigQueryRecord { override def get(column: String): Option[Object] = map.get(column) diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/ProtobufConverters.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/ProtobufConverters.scala index 0025a4959..38009c20f 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/ProtobufConverters.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/ProtobufConverters.scala @@ -33,9 +33,8 @@ import scalapb.UnknownFieldSet unknownFields = unknownFields()) } - private final def selectedFields(): Seq[String] = { + private final def selectedFields(): Seq[String] = readOption.getSelectedFieldsList.asScala.map(s => s.asInstanceOf[String]).toSeq - } private final def unknownFields(): scalapb.UnknownFieldSet = { val map = readOption.getUnknownFields @@ -46,13 +45,12 @@ import scalapb.UnknownFieldSet scalapb.UnknownFieldSet(map) } - private final def unknownField(field: com.google.protobuf.UnknownFieldSet.Field): UnknownFieldSet.Field = { + private final def unknownField(field: com.google.protobuf.UnknownFieldSet.Field): UnknownFieldSet.Field = UnknownFieldSet.Field( varint = field.getVarintList.asScala.map(_.asInstanceOf[Long]).toSeq, fixed64 = field.getFixed64List.asScala.map(_.asInstanceOf[Long]).toSeq, fixed32 = field.getFixed32List.asScala.map(_.asInstanceOf[Int]).toSeq, lengthDelimited = field.getLengthDelimitedList.asScala.toSeq) - } } diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/ArrowSource.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/ArrowSource.scala index 6069d53f0..cbf19110c 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/ArrowSource.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/ArrowSource.scala @@ -22,9 +22,10 @@ import com.google.cloud.bigquery.storage.v1.arrow.{ ArrowRecordBatch, ArrowSchem import com.google.cloud.bigquery.storage.v1.storage.BigQueryReadClient import com.google.cloud.bigquery.storage.v1.stream.ReadSession import org.apache.arrow.memory.RootAllocator -import org.apache.arrow.vector.{ VectorLoader, VectorSchemaRoot } +import org.apache.arrow.vector.{ FieldVector, VectorLoader, VectorSchemaRoot } import org.apache.arrow.vector.ipc.ReadChannel import org.apache.arrow.vector.ipc.message.MessageSerializer +import org.apache.arrow.vector.types.pojo.Schema import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel import scala.collection.mutable @@ -60,12 +61,12 @@ final class SimpleRowReader(val schema: ArrowSchema) extends AutoCloseable { val allocator = new RootAllocator(Long.MaxValue) - val sd = MessageSerializer.deserializeSchema( + val sd: Schema = MessageSerializer.deserializeSchema( new ReadChannel( new ByteArrayReadableSeekableByteChannel( schema.serializedSchema.toByteArray))) - val vec = sd.getFields.asScala.map(_.createVector(allocator)) + val vec: mutable.Buffer[FieldVector] = sd.getFields.asScala.map(_.createVector(allocator)) var root = new VectorSchemaRoot(vec.asJava) val loader = new VectorLoader(root) @@ -73,7 +74,7 @@ final class SimpleRowReader(val schema: ArrowSchema) extends AutoCloseable { val deserializedBatch = MessageSerializer.deserializeRecordBatch(new ReadChannel( new ByteArrayReadableSeekableByteChannel( batch.serializedRecordBatch.toByteArray)), - allocator); + allocator) loader.load(deserializedBatch) deserializedBatch.close() @@ -89,13 +90,13 @@ final class SimpleRowReader(val schema: ArrowSchema) extends AutoCloseable { recordsList += BigQueryRecord.fromMap(map.toMap) } - root.clear(); + root.clear() recordsList.toList } override def close(): Unit = { - root.close(); - allocator.close(); + root.close() + allocator.close() } } diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AvroSource.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AvroSource.scala index 47398cd47..64e0e2058 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AvroSource.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AvroSource.scala @@ -23,11 +23,10 @@ import com.google.cloud.bigquery.storage.v1.stream.ReadSession object AvroSource { - def readRecordsMerged(client: BigQueryReadClient, readSession: ReadSession): Source[List[BigQueryRecord], NotUsed] = { + def readRecordsMerged(client: BigQueryReadClient, readSession: ReadSession): Source[List[BigQueryRecord], NotUsed] = readMerged(client, readSession) .map(a => AvroDecoder(readSession.schema.avroSchema.get.schema).decodeRows(a.serializedBinaryRows)) .map(_.map(BigQueryRecord.fromAvro)) - } def readMerged(client: BigQueryReadClient, session: ReadSession): Source[AvroRows, NotUsed] = read(client, session).reduce((a, b) => a.merge(b)) diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AkkaGrpcSettings.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/PekkoGrpcSettings.scala similarity index 97% rename from google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AkkaGrpcSettings.scala rename to google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/PekkoGrpcSettings.scala index 5b0f3d9dd..9d83ff6d2 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/AkkaGrpcSettings.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/PekkoGrpcSettings.scala @@ -62,7 +62,7 @@ import java.util.concurrent.Executor .fold(settings.withTls(false))(_ => settings.withTls(true)) val setCallCredentials = (settings: GrpcClientSettings) => { - implicit val config = system.classicSystem.settings.config + implicit val config: Config = system.classicSystem.settings.config val executor: Executor = system.classicSystem.dispatcher settings.withCallCredentials(MoreCallCredentials.from(credentials().asGoogle(executor, requestSettings()))) } diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/SDKClientSource.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/SDKClientSource.scala index 21b5d0f6d..cfa73c460 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/SDKClientSource.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/impl/SDKClientSource.scala @@ -23,7 +23,7 @@ object SDKClientSource { private val RequestParamsHeader = "x-goog-request-params" - def read(client: BigQueryReadClient, readSession: ReadSession): Seq[Source[ReadRowsResponse.Rows, NotUsed]] = { + def read(client: BigQueryReadClient, readSession: ReadSession): Seq[Source[ReadRowsResponse.Rows, NotUsed]] = readSession.streams .map(stream => { client @@ -32,6 +32,5 @@ object SDKClientSource { .invoke(ReadRowsRequest(stream.name)) .map(_.rows) }) - } } diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/BigQueryStorage.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/BigQueryStorage.scala index 1292c9387..0c5dfd1b2 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/BigQueryStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/BigQueryStorage.scala @@ -59,7 +59,7 @@ object BigQueryStorage { tableId: String, dataFormat: DataFormat, readOptions: Option[TableReadOptions] = None, - maxNumStreams: Int = 0)(implicit um: FromByteStringUnmarshaller[A]): Source[A, Future[NotUsed]] = { + maxNumStreams: Int = 0)(implicit um: FromByteStringUnmarshaller[A]): Source[A, Future[NotUsed]] = Source.fromMaterializer { (mat, attr) => { implicit val materializer: Materializer = mat @@ -84,7 +84,6 @@ object BigQueryStorage { .flatMapConcat(a => a.get) } } - } private[scaladsl] def readSession(client: BigQueryReadClient, projectId: String, diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala index 19370175f..b309bc35c 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala @@ -48,7 +48,7 @@ final class GrpcBigQueryStorageReaderExt private (sys: ExtendedActorSystem) exte } object GrpcBigQueryStorageReaderExt extends ExtensionId[GrpcBigQueryStorageReaderExt] with ExtensionIdProvider { - override def lookup = GrpcBigQueryStorageReaderExt + override def lookup: GrpcBigQueryStorageReaderExt.type = GrpcBigQueryStorageReaderExt override def createExtension(system: ExtendedActorSystem) = new GrpcBigQueryStorageReaderExt(system) /** diff --git a/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryStorageSpecBase.scala b/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryStorageSpecBase.scala index a31d25586..0204cdfb8 100644 --- a/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryStorageSpecBase.scala +++ b/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/BigQueryStorageSpecBase.scala @@ -38,24 +38,20 @@ abstract class BigQueryStorageSpecBase(_port: Int) extends BigQueryMockData with private val binding: Promise[Http.ServerBinding] = Promise[Http.ServerBinding]() - def storageAvroSchema = { + def storageAvroSchema = AvroSchema(com.google.cloud.bigquery.storage.v1.avro.AvroSchema.of(FullAvroSchema.toString)) - } - def storageArrowSchema = { + def storageArrowSchema = ArrowSchema( com.google.cloud.bigquery.storage.v1.arrow.ArrowSchema.of(ByteString.copyFromUtf8(FullArrowSchema.toJson))) - } - def storageAvroRows = { + def storageAvroRows = AvroRows(recordsAsRows(FullAvroRecord)) - } def startMock(): Promise[Http.ServerBinding] = { val bindingRes = new BigQueryMockServer(bqPort).run().futureValue binding.success(bindingRes) } - def stopMock(): Done = { + def stopMock(): Done = binding.future.futureValue.unbind().futureValue - } } diff --git a/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala b/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala index 6fd390e4f..ec7637880 100644 --- a/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala +++ b/google-cloud-bigquery-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala @@ -48,7 +48,7 @@ class ArrowByteStringDecoder(val schema: ArrowSchema) extends FromByteStringUnma val deserializedBatch = MessageSerializer.deserializeRecordBatch(new ReadChannel( new ByteArrayReadableSeekableByteChannel( batch.toByteBuffer.array())), - allocator); + allocator) loader.load(deserializedBatch) deserializedBatch.close() @@ -64,10 +64,10 @@ class ArrowByteStringDecoder(val schema: ArrowSchema) extends FromByteStringUnma recordsList += BigQueryRecord.fromMap(map.toMap) } - root.clear(); + root.clear() - root.close(); - allocator.close(); + root.close() + allocator.close() Future(recordsList.toList) } diff --git a/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/BigQueryBasicFormats-more-specific-type.backwards.excludes b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/BigQueryBasicFormats-more-specific-type.backwards.excludes new file mode 100644 index 000000000..09290ae43 --- /dev/null +++ b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/BigQueryBasicFormats-more-specific-type.backwards.excludes @@ -0,0 +1,12 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#BigDecimalJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#BigIntJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#BooleanJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#ByteJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#ByteStringJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#CharJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#IntJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#ShortJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#StringJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#SymbolJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryBasicFormats#UnitJsonFormat.write") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestBasicFormats#BigQueryLongJsonFormat.write") diff --git a/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..63d2ec397 --- /dev/null +++ b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.scaladsl.GrpcBigQueryStorageReaderExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.scaladsl.GrpcBigQueryStorageReaderExt.lookup") diff --git a/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..aaf87c300 --- /dev/null +++ b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcPublisherExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcPublisherExt.lookup") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcPublisherExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcPublisherExt.lookup") diff --git a/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..d530646f2 --- /dev/null +++ b/google-cloud-bigquery/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcSubscriberExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcSubscriberExt.lookup") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcSubscriberExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcSubscriberExt.lookup") diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala index d0ea423d3..e63992908 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala @@ -28,8 +28,8 @@ import scala.annotation.nowarn final case class BigQueryException private (override val info: ErrorInfo, raw: String) extends ExceptionWithErrorInfo(info) { - def getInfo = info - def getRaw = raw + def getInfo: ErrorInfo = info + def getRaw: String = raw } object BigQueryException { diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryExt.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryExt.scala index 61ab2ce1e..6616cec1c 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryExt.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryExt.scala @@ -47,7 +47,7 @@ private[bigquery] object BigQueryExt extends ExtensionId[BigQueryExt] with Exten def apply()(implicit system: ActorSystem): BigQueryExt = super.apply(system) - override def lookup = BigQueryExt + override def lookup: BigQueryExt.type = BigQueryExt override def createExtension(system: ExtendedActorSystem) = new BigQueryExt(system) /** diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQuerySettings.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQuerySettings.scala index d55100096..842550fbf 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQuerySettings.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQuerySettings.scala @@ -34,7 +34,7 @@ object BigQuerySettings { /** * Java API: Reads from the given config. */ - def create(c: Config) = apply(c) + def create(c: Config): BigQuerySettings = apply(c) /** * Scala API: Creates [[BigQuerySettings]] from the [[com.typesafe.config.Config Config]] attached to an actor system. @@ -56,14 +56,14 @@ object BigQuerySettings { /** * Java API */ - def create(loadJobPerTableQuota: time.Duration) = BigQuerySettings(loadJobPerTableQuota.asScala) + def create(loadJobPerTableQuota: time.Duration): BigQuerySettings = BigQuerySettings(loadJobPerTableQuota.asScala) } final case class BigQuerySettings @InternalApi private (loadJobPerTableQuota: FiniteDuration) { - def getLoadJobPerTableQuota = loadJobPerTableQuota.asJava - def withLoadJobPerTableQuota(loadJobPerTableQuota: FiniteDuration) = + def getLoadJobPerTableQuota: time.Duration = loadJobPerTableQuota.asJava + def withLoadJobPerTableQuota(loadJobPerTableQuota: FiniteDuration): BigQuerySettings = copy(loadJobPerTableQuota = loadJobPerTableQuota) - def withLoadJobPerTableQuota(loadJobPerTableQuota: time.Duration) = + def withLoadJobPerTableQuota(loadJobPerTableQuota: time.Duration): BigQuerySettings = copy(loadJobPerTableQuota = loadJobPerTableQuota.asScala) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/InsertAllRetryPolicy.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/InsertAllRetryPolicy.scala index 113fb76d9..99085b108 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/InsertAllRetryPolicy.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/InsertAllRetryPolicy.scala @@ -31,7 +31,7 @@ object InsertAllRetryPolicy { /** * Java API: Never retry failed insert requests */ - def never = Never + def never: Never.type = Never /** * Retry failed insert requests without deduplication @@ -44,7 +44,7 @@ object InsertAllRetryPolicy { /** * Java API: Retry failed insert requests without deduplication */ - def withoutDeduplication = WithDeduplication + def withoutDeduplication: WithDeduplication.type = WithDeduplication /** * Retry failed insert requests with best-effort deduplication @@ -59,5 +59,5 @@ object InsertAllRetryPolicy { * Java API: Retry failed insert requests with best-effort deduplication * @see [[https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataconsistency BigQuery reference]] */ - def withDeduplication = WithDeduplication + def withDeduplication: WithDeduplication.type = WithDeduplication } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala index ef9ff12ca..e18c874a4 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala @@ -21,6 +21,7 @@ import pekko.util.OptionConverters._ import spray.json.{ JsonFormat, RootJsonFormat } import java.util +import java.util.Optional import scala.collection.immutable.Seq /** @@ -37,25 +38,25 @@ final case class Dataset private[bigquery] (datasetReference: DatasetReference, labels: Option[Map[String, String]], location: Option[String]) { - def getDatasetReference = datasetReference - def getFriendlyName = friendlyName.toJava - def getLabels = labels.map(_.asJava).toJava - def getLocation = location.toJava + def getDatasetReference: DatasetReference = datasetReference + def getFriendlyName: Optional[String] = friendlyName.toJava + def getLabels: Optional[util.Map[String, String]] = labels.map(_.asJava).toJava + def getLocation: Optional[String] = location.toJava - def withDatasetReference(datasetReference: DatasetReference) = + def withDatasetReference(datasetReference: DatasetReference): Dataset = copy(datasetReference = datasetReference) - def withFriendlyName(friendlyName: Option[String]) = + def withFriendlyName(friendlyName: Option[String]): Dataset = copy(friendlyName = friendlyName) - def withFriendlyName(friendlyName: util.Optional[String]) = + def withFriendlyName(friendlyName: util.Optional[String]): Dataset = copy(friendlyName = friendlyName.toScala) - def withLabels(labels: Option[Map[String, String]]) = + def withLabels(labels: Option[Map[String, String]]): Dataset = copy(labels = labels) - def withLabels(labels: util.Optional[util.Map[String, String]]) = + def withLabels(labels: util.Optional[util.Map[String, String]]): Dataset = copy(labels = labels.toScala.map(_.asScala.toMap)) - def withLocation(location: util.Optional[String]) = + def withLocation(location: util.Optional[String]): Dataset = copy(location = location.toScala) } @@ -74,7 +75,7 @@ object Dataset { def create(datasetReference: DatasetReference, friendlyName: util.Optional[String], labels: util.Optional[util.Map[String, String]], - location: util.Optional[String]) = + location: util.Optional[String]): Dataset = Dataset(datasetReference, friendlyName.toScala, labels.toScala.map(_.asScala.toMap), location.toScala) implicit val format: RootJsonFormat[Dataset] = jsonFormat4(apply) @@ -89,17 +90,17 @@ object Dataset { */ final case class DatasetReference private[bigquery] (datasetId: Option[String], projectId: Option[String]) { - def getDatasetId = datasetId.toJava - def getProjectId = projectId.toJava + def getDatasetId: Optional[String] = datasetId.toJava + def getProjectId: Optional[String] = projectId.toJava - def withDatasetId(datasetId: Option[String]) = + def withDatasetId(datasetId: Option[String]): DatasetReference = copy(datasetId = datasetId) - def withDatasetId(datasetId: util.Optional[String]) = + def withDatasetId(datasetId: util.Optional[String]): DatasetReference = copy(datasetId = datasetId.toScala) - def withProjectId(projectId: Option[String]) = + def withProjectId(projectId: Option[String]): DatasetReference = copy(projectId = projectId) - def withProjectId(projectId: util.Optional[String]) = + def withProjectId(projectId: util.Optional[String]): DatasetReference = copy(projectId = projectId.toScala) } @@ -113,7 +114,7 @@ object DatasetReference { * @param projectId The ID of the project containing this dataset * @return a [[DatasetReference]] */ - def create(datasetId: util.Optional[String], projectId: util.Optional[String]) = + def create(datasetId: util.Optional[String], projectId: util.Optional[String]): DatasetReference = DatasetReference(datasetId.toScala, projectId.toScala) implicit val format: JsonFormat[DatasetReference] = jsonFormat2(apply) @@ -128,17 +129,17 @@ object DatasetReference { */ final case class DatasetListResponse private[bigquery] (nextPageToken: Option[String], datasets: Option[Seq[Dataset]]) { - def getNextPageToken = nextPageToken.toJava - def getDatasets = datasets.map(_.asJava).toJava + def getNextPageToken: Optional[String] = nextPageToken.toJava + def getDatasets: Optional[util.List[Dataset]] = datasets.map(_.asJava).toJava - def withNextPageToken(nextPageToken: Option[String]) = + def withNextPageToken(nextPageToken: Option[String]): DatasetListResponse = copy(nextPageToken = nextPageToken) - def withNextPageToken(nextPageToken: util.Optional[String]) = + def withNextPageToken(nextPageToken: util.Optional[String]): DatasetListResponse = copy(nextPageToken = nextPageToken.toScala) - def withDatasets(datasets: Option[Seq[Dataset]]) = + def withDatasets(datasets: Option[Seq[Dataset]]): DatasetListResponse = copy(datasets = datasets) - def withDatasets(datasets: util.Optional[util.List[Dataset]]) = + def withDatasets(datasets: util.Optional[util.List[Dataset]]): DatasetListResponse = copy(datasets = datasets.toScala.map(_.asScala.toList)) } @@ -152,7 +153,7 @@ object DatasetListResponse { * @param datasets an array of the dataset resources in the project * @return a [[DatasetListResponse]] */ - def create(nextPageToken: util.Optional[String], datasets: util.Optional[util.List[Dataset]]) = + def create(nextPageToken: util.Optional[String], datasets: util.Optional[util.List[Dataset]]): DatasetListResponse = DatasetListResponse(nextPageToken.toScala, datasets.toScala.map(_.asScala.toList)) implicit val format: RootJsonFormat[DatasetListResponse] = jsonFormat2(apply) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProto.scala similarity index 76% rename from google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala rename to google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProto.scala index a454b0ae8..895b31506 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProto.scala @@ -20,7 +20,7 @@ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonProperty } import spray.json.JsonFormat import java.util - +import java.util.Optional import scala.annotation.nowarn /** @@ -41,23 +41,23 @@ final case class ErrorProto private[bigquery] (reason: Option[String], location: @JsonProperty(value = "message") message: String) = this(Option(reason), Option(location), Option(message)) - def getReason = reason.toJava - def getLocation = location.toJava - def getMessage = message.toJava + def getReason: Optional[String] = reason.toJava + def getLocation: Optional[String] = location.toJava + def getMessage: Optional[String] = message.toJava - def withReason(reason: Option[String]) = + def withReason(reason: Option[String]): ErrorProto = copy(reason = reason) - def withReason(reason: util.Optional[String]) = + def withReason(reason: util.Optional[String]): ErrorProto = copy(reason = reason.toScala) - def withLocation(location: Option[String]) = + def withLocation(location: Option[String]): ErrorProto = copy(location = location) - def withLocation(location: util.Optional[String]) = + def withLocation(location: util.Optional[String]): ErrorProto = copy(location = location.toScala) - def withMessage(message: Option[String]) = + def withMessage(message: Option[String]): ErrorProto = copy(message = message) - def withMessage(message: util.Optional[String]) = + def withMessage(message: util.Optional[String]): ErrorProto = copy(message = message.toScala) } @@ -72,7 +72,8 @@ object ErrorProto { * @param message A human-readable description of the error * @return an [[ErrorProto]] */ - def create(reason: util.Optional[String], location: util.Optional[String], message: util.Optional[String]) = + def create( + reason: util.Optional[String], location: util.Optional[String], message: util.Optional[String]): ErrorProto = ErrorProto(reason.toScala, location.toScala, message.toScala) implicit val format: JsonFormat[ErrorProto] = jsonFormat3(apply) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala index bdd04a5ab..831e2e0ae 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonProperty } import spray.json.{ JsonFormat, RootJsonFormat } import java.util - +import java.util.Optional import scala.annotation.nowarn import scala.collection.immutable.Seq @@ -37,23 +37,23 @@ final case class Job private[bigquery] (configuration: Option[JobConfiguration], jobReference: Option[JobReference], status: Option[JobStatus]) { - def getConfiguration = configuration.toJava - def getJobReference = jobReference.toJava - def getStatus = status.toJava + def getConfiguration: Optional[JobConfiguration] = configuration.toJava + def getJobReference: Optional[JobReference] = jobReference.toJava + def getStatus: Optional[JobStatus] = status.toJava - def withConfiguration(configuration: Option[JobConfiguration]) = + def withConfiguration(configuration: Option[JobConfiguration]): Job = copy(configuration = configuration) - def withConfiguration(configuration: util.Optional[JobConfiguration]) = + def withConfiguration(configuration: util.Optional[JobConfiguration]): Job = copy(configuration = configuration.toScala) - def withJobReference(jobReference: Option[JobReference]) = + def withJobReference(jobReference: Option[JobReference]): Job = copy(jobReference = jobReference) - def withJobReference(jobReference: util.Optional[JobReference]) = + def withJobReference(jobReference: util.Optional[JobReference]): Job = copy(jobReference = jobReference.toScala) - def withStatus(status: Option[JobStatus]) = + def withStatus(status: Option[JobStatus]): Job = copy(status = status) - def withStatus(status: util.Optional[JobStatus]) = + def withStatus(status: util.Optional[JobStatus]): Job = copy(status = status.toScala) } @@ -70,7 +70,7 @@ object Job { */ def create(configuration: util.Optional[JobConfiguration], jobReference: util.Optional[JobReference], - status: util.Optional[JobStatus]) = + status: util.Optional[JobStatus]): Job = Job(configuration.toScala, jobReference.toScala, status.toScala) implicit val format: RootJsonFormat[Job] = jsonFormat3(apply) @@ -85,17 +85,17 @@ object Job { */ final case class JobConfiguration private[bigquery] (load: Option[JobConfigurationLoad], labels: Option[Map[String, String]]) { - def getLoad = load.toJava - def getLabels = labels.toJava + def getLoad: Optional[JobConfigurationLoad] = load.toJava + def getLabels: Optional[Map[String, String]] = labels.toJava - def withLoad(load: Option[JobConfigurationLoad]) = + def withLoad(load: Option[JobConfigurationLoad]): JobConfiguration = copy(load = load) - def withLoad(load: util.Optional[JobConfigurationLoad]) = + def withLoad(load: util.Optional[JobConfigurationLoad]): JobConfiguration = copy(load = load.toScala) - def withLabels(labels: Option[Map[String, String]]) = + def withLabels(labels: Option[Map[String, String]]): JobConfiguration = copy(labels = labels) - def withLabels(labels: util.Optional[util.Map[String, String]]) = + def withLabels(labels: util.Optional[util.Map[String, String]]): JobConfiguration = copy(labels = labels.toScala.map(_.asScala.toMap)) } @@ -118,7 +118,7 @@ object JobConfiguration { * @param load configures a load job * @return a [[JobConfiguration]] */ - def create(load: util.Optional[JobConfigurationLoad]) = + def create(load: util.Optional[JobConfigurationLoad]): JobConfiguration = JobConfiguration(load.toScala) /** @@ -129,7 +129,8 @@ object JobConfiguration { * @param labels the labels associated with this job * @return a [[JobConfiguration]] */ - def create(load: util.Optional[JobConfigurationLoad], labels: util.Optional[util.Map[String, String]]) = + def create( + load: util.Optional[JobConfigurationLoad], labels: util.Optional[util.Map[String, String]]): JobConfiguration = JobConfiguration(load.toScala, labels.toScala.map(_.asScala.toMap)) implicit val format: JsonFormat[JobConfiguration] = jsonFormat2(apply) @@ -151,35 +152,35 @@ final case class JobConfigurationLoad private[bigquery] (schema: Option[TableSch writeDisposition: Option[WriteDisposition], sourceFormat: Option[SourceFormat]) { - def getSchema = schema.toJava - def getDestinationTable = destinationTable.toJava - def getCreateDisposition = createDisposition.toJava - def getWriteDisposition = writeDisposition.toJava - def getSourceFormat = sourceFormat.toJava + def getSchema: Optional[TableSchema] = schema.toJava + def getDestinationTable: Optional[TableReference] = destinationTable.toJava + def getCreateDisposition: Optional[CreateDisposition] = createDisposition.toJava + def getWriteDisposition: Optional[WriteDisposition] = writeDisposition.toJava + def getSourceFormat: Optional[SourceFormat] = sourceFormat.toJava - def withSchema(schema: Option[TableSchema]) = + def withSchema(schema: Option[TableSchema]): JobConfigurationLoad = copy(schema = schema) - def withSchema(schema: util.Optional[TableSchema]) = + def withSchema(schema: util.Optional[TableSchema]): JobConfigurationLoad = copy(schema = schema.toScala) - def withDestinationTable(destinationTable: Option[TableReference]) = + def withDestinationTable(destinationTable: Option[TableReference]): JobConfigurationLoad = copy(destinationTable = destinationTable) - def withDestinationTable(destinationTable: util.Optional[TableReference]) = + def withDestinationTable(destinationTable: util.Optional[TableReference]): JobConfigurationLoad = copy(destinationTable = destinationTable.toScala) - def withCreateDisposition(createDisposition: Option[CreateDisposition]) = + def withCreateDisposition(createDisposition: Option[CreateDisposition]): JobConfigurationLoad = copy(createDisposition = createDisposition) - def withCreateDisposition(createDisposition: util.Optional[CreateDisposition]) = + def withCreateDisposition(createDisposition: util.Optional[CreateDisposition]): JobConfigurationLoad = copy(createDisposition = createDisposition.toScala) - def withWriteDisposition(writeDisposition: Option[WriteDisposition]) = + def withWriteDisposition(writeDisposition: Option[WriteDisposition]): JobConfigurationLoad = copy(writeDisposition = writeDisposition) - def withWriteDisposition(writeDisposition: util.Optional[WriteDisposition]) = + def withWriteDisposition(writeDisposition: util.Optional[WriteDisposition]): JobConfigurationLoad = copy(writeDisposition = writeDisposition.toScala) - def withSourceFormat(sourceFormat: Option[SourceFormat]) = + def withSourceFormat(sourceFormat: Option[SourceFormat]): JobConfigurationLoad = copy(sourceFormat = sourceFormat) - def withSourceFormat(sourceFormat: util.Optional[SourceFormat]) = + def withSourceFormat(sourceFormat: util.Optional[SourceFormat]): JobConfigurationLoad = copy(sourceFormat = sourceFormat.toScala) } @@ -200,7 +201,7 @@ object JobConfigurationLoad { destinationTable: util.Optional[TableReference], createDisposition: util.Optional[CreateDisposition], writeDisposition: util.Optional[WriteDisposition], - sourceFormat: util.Optional[SourceFormat]) = + sourceFormat: util.Optional[SourceFormat]): JobConfigurationLoad = JobConfigurationLoad( schema.toScala, destinationTable.toScala, @@ -217,13 +218,13 @@ object CreateDisposition { /** * Java API */ - def create(value: String) = CreateDisposition(value) + def create(value: String): CreateDisposition = CreateDisposition(value) - val CreateIfNeeded = CreateDisposition("CREATE_IF_NEEDED") - def createIfNeeded = CreateIfNeeded + val CreateIfNeeded: CreateDisposition = CreateDisposition("CREATE_IF_NEEDED") + def createIfNeeded: CreateDisposition = CreateIfNeeded - val CreateNever = CreateDisposition("CREATE_NEVER") - def createNever = CreateNever + val CreateNever: CreateDisposition = CreateDisposition("CREATE_NEVER") + def createNever: CreateDisposition = CreateNever implicit val format: JsonFormat[CreateDisposition] = StringEnum.jsonFormat(apply) } @@ -234,16 +235,16 @@ object WriteDisposition { /** * Java API */ - def create(value: String) = WriteDisposition(value) + def create(value: String): WriteDisposition = WriteDisposition(value) - val WriteTruncate = WriteDisposition("WRITE_TRUNCATE") - def writeTruncate = WriteTruncate + val WriteTruncate: WriteDisposition = WriteDisposition("WRITE_TRUNCATE") + def writeTruncate: WriteDisposition = WriteTruncate - val WriteAppend = WriteDisposition("WRITE_APPEND") - def writeAppend = WriteAppend + val WriteAppend: WriteDisposition = WriteDisposition("WRITE_APPEND") + def writeAppend: WriteDisposition = WriteAppend - val WriteEmpty = WriteDisposition("WRITE_EMPTY") - def writeEmpty = WriteEmpty + val WriteEmpty: WriteDisposition = WriteDisposition("WRITE_EMPTY") + def writeEmpty: WriteDisposition = WriteEmpty implicit val format: JsonFormat[WriteDisposition] = StringEnum.jsonFormat(apply) } @@ -254,10 +255,10 @@ object SourceFormat { /** * Java API */ - def create(value: String) = SourceFormat(value) + def create(value: String): SourceFormat = SourceFormat(value) - val NewlineDelimitedJsonFormat = SourceFormat("NEWLINE_DELIMITED_JSON") - def newlineDelimitedJsonFormat = NewlineDelimitedJsonFormat + val NewlineDelimitedJsonFormat: SourceFormat = SourceFormat("NEWLINE_DELIMITED_JSON") + def newlineDelimitedJsonFormat: SourceFormat = NewlineDelimitedJsonFormat implicit val format: JsonFormat[SourceFormat] = StringEnum.jsonFormat(apply) } @@ -280,23 +281,23 @@ final case class JobReference private[bigquery] (projectId: Option[String], jobI @JsonProperty("location") location: String) = this(Option(projectId), Option(jobId), Option(location)) - def getProjectId = projectId.toJava - def getJobId = jobId.toJava - def getLocation = location.toJava + def getProjectId: Optional[String] = projectId.toJava + def getJobId: Optional[String] = jobId.toJava + def getLocation: Optional[String] = location.toJava - def withProjectId(projectId: Option[String]) = + def withProjectId(projectId: Option[String]): JobReference = copy(projectId = projectId) - def withProjectId(projectId: util.Optional[String]) = + def withProjectId(projectId: util.Optional[String]): JobReference = copy(projectId = projectId.toScala) - def withJobId(jobId: Option[String]) = + def withJobId(jobId: Option[String]): JobReference = copy(jobId = jobId) - def withJobId(jobId: util.Optional[String]) = + def withJobId(jobId: util.Optional[String]): JobReference = copy(jobId = jobId.toScala) - def withLocation(location: Option[String]) = + def withLocation(location: Option[String]): JobReference = copy(location = location) - def withLocation(location: util.Optional[String]) = + def withLocation(location: util.Optional[String]): JobReference = copy(location = location.toScala) } @@ -311,7 +312,8 @@ object JobReference { * @param location the geographic location of the job * @return a [[JobReference]] */ - def create(projectId: util.Optional[String], jobId: util.Optional[String], location: util.Optional[String]) = + def create( + projectId: util.Optional[String], jobId: util.Optional[String], location: util.Optional[String]): JobReference = JobReference(projectId.toScala, jobId.toScala, location.toScala) implicit val format: JsonFormat[JobReference] = jsonFormat3(apply) @@ -328,21 +330,21 @@ object JobReference { final case class JobStatus private[bigquery] (errorResult: Option[ErrorProto], errors: Option[Seq[ErrorProto]], state: JobState) { - def getErrorResult = errorResult.toJava - def getErrors = errors.map(_.asJava).toJava - def getState = state + def getErrorResult: Optional[ErrorProto] = errorResult.toJava + def getErrors: Optional[util.List[ErrorProto]] = errors.map(_.asJava).toJava + def getState: JobState = state - def withErrorResult(errorResult: Option[ErrorProto]) = + def withErrorResult(errorResult: Option[ErrorProto]): JobStatus = copy(errorResult = errorResult) - def withErrorResult(errorResult: util.Optional[ErrorProto]) = + def withErrorResult(errorResult: util.Optional[ErrorProto]): JobStatus = copy(errorResult = errorResult.toScala) - def withErrors(errors: Option[Seq[ErrorProto]]) = + def withErrors(errors: Option[Seq[ErrorProto]]): JobStatus = copy(errors = errors) - def withErrors(errors: util.Optional[util.List[ErrorProto]]) = + def withErrors(errors: util.Optional[util.List[ErrorProto]]): JobStatus = copy(errors = errors.toScala.map(_.asScala.toList)) - def withState(state: JobState) = + def withState(state: JobState): JobStatus = copy(state = state) } @@ -357,7 +359,8 @@ object JobStatus { * @param state running state of the job * @return a [[JobStatus]] */ - def create(errorResult: util.Optional[ErrorProto], errors: util.Optional[util.List[ErrorProto]], state: JobState) = + def create(errorResult: util.Optional[ErrorProto], errors: util.Optional[util.List[ErrorProto]], state: JobState) + : JobStatus = JobStatus(errorResult.toScala, errors.toScala.map(_.asScala.toList), state) implicit val format: JsonFormat[JobStatus] = jsonFormat3(apply) @@ -369,23 +372,23 @@ object JobState { /** * Java API */ - def create(value: String) = JobState(value) + def create(value: String): JobState = JobState(value) - val Pending = JobState("PENDING") - def pending = Pending + val Pending: JobState = JobState("PENDING") + def pending: JobState = Pending - val Running = JobState("RUNNING") - def running = Running + val Running: JobState = JobState("RUNNING") + def running: JobState = Running - val Done = JobState("DONE") - def done = Done + val Done: JobState = JobState("DONE") + def done: JobState = Done implicit val format: JsonFormat[JobState] = StringEnum.jsonFormat(apply) } final case class JobCancelResponse private[bigquery] (job: Job) { - def getJob = job - def withJob(job: Job) = + def getJob: Job = job + def withJob(job: Job): JobCancelResponse = copy(job = job) } @@ -394,7 +397,7 @@ object JobCancelResponse { /** * Java API */ - def create(job: Job) = JobCancelResponse(job) + def create(job: Job): JobCancelResponse = JobCancelResponse(job) implicit val format: RootJsonFormat[JobCancelResponse] = jsonFormat1(apply) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala index 7149d8225..9b6ec441f 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala @@ -24,6 +24,7 @@ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonIgnoreProperties, Jso import spray.json.{ JsonFormat, RootJsonFormat, RootJsonReader } import java.time.Duration +import java.util.{ Optional, OptionalInt, OptionalLong } import java.{ lang, util } import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance @@ -56,63 +57,63 @@ final case class QueryRequest private[bigquery] (query: String, maximumBytesBilled: Option[Long], requestId: Option[String]) { - def getQuery = query - def getMaxResults = maxResults.toJavaPrimitive - def getDefaultDataset = defaultDataset.toJava - def getTimeout = timeout.map(_.asJava).toJava - def getDryRun = dryRun.map(lang.Boolean.valueOf).toJava - def getUseLegacySql = useLegacySql.map(lang.Boolean.valueOf).toJava - def getRequestId = requestId.toJava - def getLocation = location.toJava - def getMaximumBytesBilled = maximumBytesBilled.toJava - def getLabels = labels.toJava - - def withQuery(query: String) = + def getQuery: String = query + def getMaxResults: OptionalInt = maxResults.toJavaPrimitive + def getDefaultDataset: Optional[DatasetReference] = defaultDataset.toJava + def getTimeout: Optional[Duration] = timeout.map(_.asJava).toJava + def getDryRun: Optional[lang.Boolean] = dryRun.map(lang.Boolean.valueOf).toJava + def getUseLegacySql: Optional[lang.Boolean] = useLegacySql.map(lang.Boolean.valueOf).toJava + def getRequestId: Optional[String] = requestId.toJava + def getLocation: Optional[String] = location.toJava + def getMaximumBytesBilled: Optional[Long] = maximumBytesBilled.toJava + def getLabels: Optional[Map[String, String]] = labels.toJava + + def withQuery(query: String): QueryRequest = copy(query = query) - def withMaxResults(maxResults: Option[Int]) = + def withMaxResults(maxResults: Option[Int]): QueryRequest = copy(maxResults = maxResults) - def withMaxResults(maxResults: util.OptionalInt) = + def withMaxResults(maxResults: util.OptionalInt): QueryRequest = copy(maxResults = maxResults.toScala) - def withDefaultDataset(defaultDataset: Option[DatasetReference]) = + def withDefaultDataset(defaultDataset: Option[DatasetReference]): QueryRequest = copy(defaultDataset = defaultDataset) - def withDefaultDataset(defaultDataset: util.Optional[DatasetReference]) = + def withDefaultDataset(defaultDataset: util.Optional[DatasetReference]): QueryRequest = copy(defaultDataset = defaultDataset.toScala) - def withTimeout(timeout: Option[FiniteDuration]) = + def withTimeout(timeout: Option[FiniteDuration]): QueryRequest = copy(timeout = timeout) - def withTimeout(timeout: util.Optional[Duration]) = + def withTimeout(timeout: util.Optional[Duration]): QueryRequest = copy(timeout = timeout.toScala.map(_.asScala)) - def withDryRun(dryRun: Option[Boolean]) = + def withDryRun(dryRun: Option[Boolean]): QueryRequest = copy(dryRun = dryRun) - def withDryRun(dryRun: util.Optional[lang.Boolean]) = + def withDryRun(dryRun: util.Optional[lang.Boolean]): QueryRequest = copy(dryRun = dryRun.toScala.map(_.booleanValue)) - def withUseLegacySql(useLegacySql: Option[Boolean]) = + def withUseLegacySql(useLegacySql: Option[Boolean]): QueryRequest = copy(useLegacySql = useLegacySql) - def withUseLegacySql(useLegacySql: util.Optional[lang.Boolean]) = + def withUseLegacySql(useLegacySql: util.Optional[lang.Boolean]): QueryRequest = copy(useLegacySql = useLegacySql.toScala.map(_.booleanValue)) - def withRequestId(requestId: Option[String]) = + def withRequestId(requestId: Option[String]): QueryRequest = copy(requestId = requestId) - def withRequestId(requestId: util.Optional[String]) = + def withRequestId(requestId: util.Optional[String]): QueryRequest = copy(requestId = requestId.toScala) - def withLocation(location: Option[String]) = + def withLocation(location: Option[String]): QueryRequest = copy(location = location) - def withLocation(location: util.Optional[String]) = + def withLocation(location: util.Optional[String]): QueryRequest = copy(location = location.toScala) - def withMaximumBytesBilled(maximumBytesBilled: Option[Long]) = + def withMaximumBytesBilled(maximumBytesBilled: Option[Long]): QueryRequest = copy(maximumBytesBilled = maximumBytesBilled) - def withMaximumBytesBilled(maximumBytesBilled: util.OptionalLong) = + def withMaximumBytesBilled(maximumBytesBilled: util.OptionalLong): QueryRequest = copy(maximumBytesBilled = maximumBytesBilled.toScala) - def withLabels(labels: Option[Map[String, String]]) = + def withLabels(labels: Option[Map[String, String]]): QueryRequest = copy(labels = labels) - def withLabels(labels: util.Optional[util.Map[String, String]]) = + def withLabels(labels: util.Optional[util.Map[String, String]]): QueryRequest = copy(labels = labels.toScala.map(_.asScala.toMap)) } @@ -146,7 +147,7 @@ object QueryRequest { timeout: util.Optional[Duration], dryRun: util.Optional[lang.Boolean], useLegacySql: util.Optional[lang.Boolean], - requestId: util.Optional[String]) = + requestId: util.Optional[String]): QueryRequest = QueryRequest( query, maxResults.toScala, @@ -226,61 +227,61 @@ final case class QueryResponse[+T] private[bigquery] (schema: Option[TableSchema Option(cacheHit).map(_.booleanValue), Option(numDmlAffectedRows).map(_.toLong)) - def getSchema = schema.toJava - def getJobReference = jobReference - def getTotalRows = totalRows.toJavaPrimitive - def getPageToken = pageToken.toJava + def getSchema: Optional[TableSchema] = schema.toJava + def getJobReference: JobReference = jobReference + def getTotalRows: OptionalLong = totalRows.toJavaPrimitive + def getPageToken: Optional[String] = pageToken.toJava def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).toJava - def getTotalBytesProcessed = totalBytesProcessed.toJavaPrimitive - def getJobComplete = jobComplete - def getErrors = errors.map(_.asJava).toJava - def getCacheHit = cacheHit.map(lang.Boolean.valueOf).toJava - def getNumDmlAffectedRows = numDmlAffectedRows.toJavaPrimitive + def getTotalBytesProcessed: OptionalLong = totalBytesProcessed.toJavaPrimitive + def getJobComplete: Boolean = jobComplete + def getErrors: Optional[util.List[ErrorProto]] = errors.map(_.asJava).toJava + def getCacheHit: Optional[lang.Boolean] = cacheHit.map(lang.Boolean.valueOf).toJava + def getNumDmlAffectedRows: OptionalLong = numDmlAffectedRows.toJavaPrimitive - def withSchema(schema: Option[TableSchema]) = + def withSchema(schema: Option[TableSchema]): QueryResponse[T] = copy(schema = schema) - def withSchema(schema: util.Optional[TableSchema]) = + def withSchema(schema: util.Optional[TableSchema]): QueryResponse[T] = copy(schema = schema.toScala) - def withJobReference(jobReference: JobReference) = + def withJobReference(jobReference: JobReference): QueryResponse[T] = copy(jobReference = jobReference) - def withTotalRows(totalRows: Option[Long]) = + def withTotalRows(totalRows: Option[Long]): QueryResponse[T] = copy(totalRows = totalRows) - def withTotalRows(totalRows: util.OptionalLong) = + def withTotalRows(totalRows: util.OptionalLong): QueryResponse[T] = copy(totalRows = totalRows.toScala) - def withPageToken(pageToken: Option[String]) = + def withPageToken(pageToken: Option[String]): QueryResponse[T] = copy(pageToken = pageToken) - def withPageToken(pageToken: util.Optional[String]) = + def withPageToken(pageToken: util.Optional[String]): QueryResponse[T] = copy(pageToken = pageToken.toScala) - def withRows[S >: T](rows: Option[Seq[S]]) = + def withRows[S >: T](rows: Option[Seq[S]]): QueryResponse[S] = copy(rows = rows) - def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]) = + def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]): QueryResponse[T] = copy(rows = rows.toScala.map(_.asScala.toList)) - def withTotalBytesProcessed(totalBytesProcessed: Option[Long]) = + def withTotalBytesProcessed(totalBytesProcessed: Option[Long]): QueryResponse[T] = copy(totalBytesProcessed = totalBytesProcessed) - def withTotalBytesProcessed(totalBytesProcessed: util.OptionalLong) = + def withTotalBytesProcessed(totalBytesProcessed: util.OptionalLong): QueryResponse[T] = copy(totalBytesProcessed = totalBytesProcessed.toScala) - def withJobComplete(jobComplete: Boolean) = + def withJobComplete(jobComplete: Boolean): QueryResponse[T] = copy(jobComplete = jobComplete) - def withErrors(errors: Option[Seq[ErrorProto]]) = + def withErrors(errors: Option[Seq[ErrorProto]]): QueryResponse[T] = copy(errors = errors) - def withErrors(errors: util.Optional[util.List[ErrorProto]]) = + def withErrors(errors: util.Optional[util.List[ErrorProto]]): QueryResponse[T] = copy(errors = errors.toScala.map(_.asScala.toList)) - def withCacheHit(cacheHit: Option[Boolean]) = + def withCacheHit(cacheHit: Option[Boolean]): QueryResponse[T] = copy(cacheHit = cacheHit) - def withCacheHit(cacheHit: util.Optional[lang.Boolean]) = + def withCacheHit(cacheHit: util.Optional[lang.Boolean]): QueryResponse[T] = copy(cacheHit = cacheHit.toScala.map(_.booleanValue)) - def withNumDmlAffectedRows(numDmlAffectedRows: Option[Long]) = + def withNumDmlAffectedRows(numDmlAffectedRows: Option[Long]): QueryResponse[T] = copy(numDmlAffectedRows = numDmlAffectedRows) - def withNumDmlAffectedRows(numDmlAffectedRows: util.OptionalLong) = + def withNumDmlAffectedRows(numDmlAffectedRows: util.OptionalLong): QueryResponse[T] = copy(numDmlAffectedRows = numDmlAffectedRows.toScala) } @@ -313,7 +314,7 @@ object QueryResponse { jobComplete: Boolean, errors: util.Optional[util.List[ErrorProto]], cacheHit: util.Optional[lang.Boolean], - numDmlAffectedRows: util.OptionalLong) = + numDmlAffectedRows: util.OptionalLong): QueryResponse[T] = QueryResponse[T]( schema.toScala, jobReference, diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala index 1ca69cc74..3fadfe318 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala @@ -24,6 +24,7 @@ import com.fasterxml.jackson.annotation._ import spray.json.{ JsonFormat, RootJsonFormat, RootJsonReader, RootJsonWriter } import java.{ lang, util } +import java.util.Optional import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable.Seq @@ -48,21 +49,21 @@ final case class TableDataListResponse[+T] private[bigquery] (totalRows: Long, p @JsonProperty("rows") rows: util.List[T]) = this(totalRows.toLong, Option(pageToken), Option(rows).map(_.asScala.toList)) - def getTotalRows = totalRows - def getPageToken = pageToken.toJava + def getTotalRows: Long = totalRows + def getPageToken: Optional[String] = pageToken.toJava def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).toJava - def withTotalRows(totalRows: Long) = + def withTotalRows(totalRows: Long): TableDataListResponse[T] = copy(totalRows = totalRows) - def withPageToken(pageToken: Option[String]) = + def withPageToken(pageToken: Option[String]): TableDataListResponse[T] = copy(pageToken = pageToken) - def withPageToken(pageToken: util.Optional[String]) = + def withPageToken(pageToken: util.Optional[String]): TableDataListResponse[T] = copy(pageToken = pageToken.toScala) - def withRows[S >: T](rows: Option[Seq[S]]) = + def withRows[S >: T](rows: Option[Seq[S]]): TableDataListResponse[S] = copy(rows = rows) - def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]) = + def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]): TableDataListResponse[T] = copy(rows = rows.toScala.map(_.asScala.toList)) } @@ -78,7 +79,8 @@ object TableDataListResponse { * @tparam T the data model of each row * @return a [[TableDataListResponse]] */ - def create[T](totalRows: Long, pageToken: util.Optional[String], rows: util.Optional[util.List[T]]) = + def create[T]( + totalRows: Long, pageToken: util.Optional[String], rows: util.Optional[util.List[T]]): TableDataListResponse[T] = TableDataListResponse(totalRows, pageToken.toScala, rows.toScala.map(_.asScala.toList)) implicit def reader[T <: AnyRef]( @@ -105,9 +107,9 @@ final case class TableDataInsertAllRequest[+T] private[bigquery] (skipInvalidRow templateSuffix: Option[String], rows: Seq[Row[T]]) { - @JsonIgnore def getSkipInvalidRows = skipInvalidRows.map(lang.Boolean.valueOf).toJava - @JsonIgnore def getIgnoreUnknownValues = ignoreUnknownValues.map(lang.Boolean.valueOf).toJava - @JsonIgnore def getTemplateSuffix = templateSuffix.toJava + @JsonIgnore def getSkipInvalidRows: Optional[lang.Boolean] = skipInvalidRows.map(lang.Boolean.valueOf).toJava + @JsonIgnore def getIgnoreUnknownValues: Optional[lang.Boolean] = ignoreUnknownValues.map(lang.Boolean.valueOf).toJava + @JsonIgnore def getTemplateSuffix: Optional[String] = templateSuffix.toJava def getRows: util.List[Row[T] @uncheckedVariance] = rows.asJava @nowarn("msg=never used") @@ -120,24 +122,24 @@ final case class TableDataInsertAllRequest[+T] private[bigquery] (skipInvalidRow @JsonGetter("templateSuffix") private def templateSuffixOrNull = templateSuffix.orNull - def withSkipInvalidRows(skipInvalidRows: Option[Boolean]) = + def withSkipInvalidRows(skipInvalidRows: Option[Boolean]): TableDataInsertAllRequest[T] = copy(skipInvalidRows = skipInvalidRows) - def withSkipInvalidRows(skipInvalidRows: util.Optional[lang.Boolean]) = + def withSkipInvalidRows(skipInvalidRows: util.Optional[lang.Boolean]): TableDataInsertAllRequest[T] = copy(skipInvalidRows = skipInvalidRows.toScala.map(_.booleanValue)) - def withIgnoreUnknownValues(ignoreUnknownValues: Option[Boolean]) = + def withIgnoreUnknownValues(ignoreUnknownValues: Option[Boolean]): TableDataInsertAllRequest[T] = copy(ignoreUnknownValues = ignoreUnknownValues) - def withIgnoreUnknownValues(ignoreUnknownValues: util.Optional[lang.Boolean]) = + def withIgnoreUnknownValues(ignoreUnknownValues: util.Optional[lang.Boolean]): TableDataInsertAllRequest[T] = copy(ignoreUnknownValues = ignoreUnknownValues.toScala.map(_.booleanValue)) - def withTemplateSuffix(templateSuffix: Option[String]) = + def withTemplateSuffix(templateSuffix: Option[String]): TableDataInsertAllRequest[T] = copy(templateSuffix = templateSuffix) - def withTemplateSuffix(templateSuffix: util.Optional[String]) = + def withTemplateSuffix(templateSuffix: util.Optional[String]): TableDataInsertAllRequest[T] = copy(templateSuffix = templateSuffix.toScala) - def withRows[S >: T](rows: Seq[Row[S]]) = + def withRows[S >: T](rows: Seq[Row[S]]): TableDataInsertAllRequest[S] = copy(rows = rows) - def withRows(rows: util.List[Row[T] @uncheckedVariance]) = + def withRows(rows: util.List[Row[T] @uncheckedVariance]): TableDataInsertAllRequest[T] = copy(rows = rows.asScala.toList) } @@ -157,7 +159,7 @@ object TableDataInsertAllRequest { def create[T](skipInvalidRows: util.Optional[lang.Boolean], ignoreUnknownValues: util.Optional[lang.Boolean], templateSuffix: util.Optional[String], - rows: util.List[Row[T]]) = + rows: util.List[Row[T]]): TableDataInsertAllRequest[T] = TableDataInsertAllRequest( skipInvalidRows.toScala.map(_.booleanValue), ignoreUnknownValues.toScala.map(_.booleanValue), @@ -182,12 +184,12 @@ object TableDataInsertAllRequest { */ final case class Row[+T] private[bigquery] (insertId: Option[String], json: T) { - def getInsertId = insertId.toJava - def getJson = json + def getInsertId: Optional[String] = insertId.toJava + def getJson: T = json - def withInsertId(insertId: Option[String]) = + def withInsertId(insertId: Option[String]): Row[T] = copy(insertId = insertId) - def withInsertId(insertId: util.Optional[String]) = + def withInsertId(insertId: util.Optional[String]): Row[T] = copy(insertId = insertId.toScala) def withJson[U >: T](json: U): Row[U] = @@ -205,7 +207,7 @@ object Row { * @tparam T the data model of the record * @return a [[Row]] */ - def create[T](insertId: util.Optional[String], json: T) = + def create[T](insertId: util.Optional[String], json: T): Row[T] = Row(insertId.toScala, json) } @@ -214,12 +216,12 @@ object Row { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ final case class TableDataInsertAllResponse private[bigquery] (insertErrors: Option[Seq[InsertError]]) { - def getInsertErrors = insertErrors.map(_.asJava).toJava + def getInsertErrors: Optional[util.List[InsertError]] = insertErrors.map(_.asJava).toJava - def withInsertErrors(insertErrors: Option[Seq[InsertError]]) = + def withInsertErrors(insertErrors: Option[Seq[InsertError]]): TableDataInsertAllResponse = copy(insertErrors = insertErrors) - def withInsertErrors(insertErrors: util.Optional[util.List[InsertError]]) = + def withInsertErrors(insertErrors: util.Optional[util.List[InsertError]]): TableDataInsertAllResponse = copy(insertErrors = insertErrors.toScala.map(_.asScala.toList)) } @@ -229,7 +231,7 @@ object TableDataInsertAllResponse { * Java API: TableDataInsertAllResponse model * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ - def create(insertErrors: util.Optional[util.List[InsertError]]) = + def create(insertErrors: util.Optional[util.List[InsertError]]): TableDataInsertAllResponse = TableDataInsertAllResponse(insertErrors.toScala.map(_.asScala.toList)) implicit val format: RootJsonFormat[TableDataInsertAllResponse] = @@ -241,15 +243,15 @@ object TableDataInsertAllResponse { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ final case class InsertError private[bigquery] (index: Int, errors: Option[Seq[ErrorProto]]) { - def getIndex = index - def getErrors = errors.map(_.asJava).toJava + def getIndex: Int = index + def getErrors: Optional[util.List[ErrorProto]] = errors.map(_.asJava).toJava - def withIndex(index: Int) = + def withIndex(index: Int): InsertError = copy(index = index) - def withErrors(errors: Option[Seq[ErrorProto]]) = + def withErrors(errors: Option[Seq[ErrorProto]]): InsertError = copy(errors = errors) - def withErrors(errors: util.Optional[util.List[ErrorProto]]) = + def withErrors(errors: util.Optional[util.List[ErrorProto]]): InsertError = copy(errors = errors.toScala.map(_.asScala.toList)) } @@ -259,7 +261,7 @@ object InsertError { * Java API: InsertError model * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ - def create(index: Int, errors: util.Optional[util.List[ErrorProto]]) = + def create(index: Int, errors: util.Optional[util.List[ErrorProto]]): InsertError = InsertError(index, errors.toScala.map(_.asScala.toList)) implicit val format: JsonFormat[InsertError] = jsonFormat2(apply) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala index 60bdae7f3..731c29968 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonProperty } import spray.json.{ JsonFormat, RootJsonFormat } import java.util - +import java.util.{ Optional, OptionalInt, OptionalLong } import scala.annotation.nowarn import scala.annotation.varargs import scala.collection.immutable.Seq @@ -43,33 +43,33 @@ final case class Table private[bigquery] (tableReference: TableReference, numRows: Option[Long], location: Option[String]) { - def getTableReference = tableReference - def getLabels = labels.map(_.asJava).toJava - def getSchema = schema.toJava - def getNumRows = numRows.toJavaPrimitive - def getLocation = location.toJava + def getTableReference: TableReference = tableReference + def getLabels: Optional[util.Map[String, String]] = labels.map(_.asJava).toJava + def getSchema: Optional[TableSchema] = schema.toJava + def getNumRows: OptionalLong = numRows.toJavaPrimitive + def getLocation: Optional[String] = location.toJava - def withTableReference(tableReference: TableReference) = + def withTableReference(tableReference: TableReference): Table = copy(tableReference = tableReference) - def withLabels(labels: Option[Map[String, String]]) = + def withLabels(labels: Option[Map[String, String]]): Table = copy(labels = labels) - def withLabels(labels: util.Optional[util.Map[String, String]]) = + def withLabels(labels: util.Optional[util.Map[String, String]]): Table = copy(labels = labels.toScala.map(_.asScala.toMap)) - def withSchema(schema: Option[TableSchema]) = + def withSchema(schema: Option[TableSchema]): Table = copy(schema = schema) - def withSchema(schema: util.Optional[TableSchema]) = + def withSchema(schema: util.Optional[TableSchema]): Table = copy(schema = schema.toScala) - def withNumRows(numRows: Option[Long]) = + def withNumRows(numRows: Option[Long]): Table = copy(numRows = numRows) - def withNumRows(numRows: util.OptionalLong) = + def withNumRows(numRows: util.OptionalLong): Table = copy(numRows = numRows.toScala) - def withLocation(location: Option[String]) = + def withLocation(location: Option[String]): Table = copy(location = location) - def withLocation(location: util.Optional[String]) = + def withLocation(location: util.Optional[String]): Table = copy(location = location.toScala) } @@ -90,7 +90,7 @@ object Table { labels: util.Optional[util.Map[String, String]], schema: util.Optional[TableSchema], numRows: util.OptionalLong, - location: util.Optional[String]) = + location: util.Optional[String]): Table = Table( tableReference, labels.toScala.map(_.asScala.toMap), @@ -112,21 +112,21 @@ object Table { final case class TableReference private[bigquery] (projectId: Option[String], datasetId: String, tableId: Option[String]) { - def getProjectId = projectId.toJava - def getDatasetId = datasetId - def getTableId = tableId + def getProjectId: Optional[String] = projectId.toJava + def getDatasetId: String = datasetId + def getTableId: Option[String] = tableId - def withProjectId(projectId: Option[String]) = + def withProjectId(projectId: Option[String]): TableReference = copy(projectId = projectId) - def withProjectId(projectId: util.Optional[String]) = + def withProjectId(projectId: util.Optional[String]): TableReference = copy(projectId = projectId.toScala) - def withDatasetId(datasetId: String) = + def withDatasetId(datasetId: String): TableReference = copy(datasetId = datasetId) - def withTableId(tableId: Option[String]) = + def withTableId(tableId: Option[String]): TableReference = copy(tableId = tableId) - def withTableId(tableId: util.Optional[String]) = + def withTableId(tableId: util.Optional[String]): TableReference = copy(tableId = tableId.toScala) } @@ -141,7 +141,7 @@ object TableReference { * @param tableId the ID of the table * @return a [[TableReference]] */ - def create(projectId: util.Optional[String], datasetId: String, tableId: util.Optional[String]) = + def create(projectId: util.Optional[String], datasetId: String, tableId: util.Optional[String]): TableReference = TableReference(projectId.toScala, datasetId, tableId.toScala) implicit val referenceFormat: JsonFormat[TableReference] = jsonFormat3(apply) @@ -160,11 +160,11 @@ final case class TableSchema private[bigquery] (fields: Seq[TableFieldSchema]) { private def this(@JsonProperty(value = "fields", required = true) fields: util.List[TableFieldSchema]) = this(fields.asScala.toList) - def getFields = fields.asJava + def getFields: util.List[TableFieldSchema] = fields.asJava - def withFields(fields: Seq[TableFieldSchema]) = + def withFields(fields: Seq[TableFieldSchema]): TableSchema = copy(fields = fields) - def withFields(fields: util.List[TableFieldSchema]) = + def withFields(fields: util.List[TableFieldSchema]): TableSchema = copy(fields = fields.asScala.toList) } @@ -177,7 +177,7 @@ object TableSchema { * @param fields describes the fields in a table * @return a [[TableSchema]] */ - def create(fields: util.List[TableFieldSchema]) = TableSchema(fields.asScala.toList) + def create(fields: util.List[TableFieldSchema]): TableSchema = TableSchema(fields.asScala.toList) /** * Java API: Schema of a table @@ -187,7 +187,7 @@ object TableSchema { * @return a [[TableSchema]] */ @varargs - def create(fields: TableFieldSchema*) = TableSchema(fields.toList) + def create(fields: TableFieldSchema*): TableSchema = TableSchema(fields.toList) implicit val format: JsonFormat[TableSchema] = jsonFormat1(apply) } @@ -218,25 +218,25 @@ final case class TableFieldSchema private[bigquery] (name: String, Option(mode).map(TableFieldSchemaMode.apply), Option(fields).map(_.asScala.toList)) - def getName = name - def getType = `type` - def getMode = mode.toJava - def getFields = fields.map(_.asJava).toJava + def getName: String = name + def getType: TableFieldSchemaType = `type` + def getMode: Optional[TableFieldSchemaMode] = mode.toJava + def getFields: Optional[util.List[TableFieldSchema]] = fields.map(_.asJava).toJava - def withName(name: String) = + def withName(name: String): TableFieldSchema = copy(name = name) - def withType(`type`: TableFieldSchemaType) = + def withType(`type`: TableFieldSchemaType): TableFieldSchema = copy(`type` = `type`) - def withMode(mode: Option[TableFieldSchemaMode]) = + def withMode(mode: Option[TableFieldSchemaMode]): TableFieldSchema = copy(mode = mode) - def withMode(mode: util.Optional[TableFieldSchemaMode]) = + def withMode(mode: util.Optional[TableFieldSchemaMode]): TableFieldSchema = copy(mode = mode.toScala) - def withFields(fields: Option[Seq[TableFieldSchema]]) = + def withFields(fields: Option[Seq[TableFieldSchema]]): TableFieldSchema = copy(fields = fields) - def withFields(fields: util.Optional[util.List[TableFieldSchema]]) = + def withFields(fields: util.Optional[util.List[TableFieldSchema]]): TableFieldSchema = copy(fields = fields.toScala.map(_.asScala.toList)) } @@ -255,7 +255,7 @@ object TableFieldSchema { def create(name: String, `type`: TableFieldSchemaType, mode: util.Optional[TableFieldSchemaMode], - fields: util.Optional[util.List[TableFieldSchema]]) = + fields: util.Optional[util.List[TableFieldSchema]]): TableFieldSchema = TableFieldSchema(name, `type`, mode.toScala, fields.toScala.map(_.asScala.toList)) /** @@ -272,7 +272,7 @@ object TableFieldSchema { def create(name: String, `type`: TableFieldSchemaType, mode: util.Optional[TableFieldSchemaMode], - fields: TableFieldSchema*) = + fields: TableFieldSchema*): TableFieldSchema = TableFieldSchema(name, `type`, mode.toScala, if (fields.nonEmpty) Some(fields.toList) else None) implicit val format: JsonFormat[TableFieldSchema] = lazyFormat( @@ -285,46 +285,46 @@ object TableFieldSchemaType { /** * Java API */ - def create(value: String) = TableFieldSchemaType(value) + def create(value: String): TableFieldSchemaType = TableFieldSchemaType(value) - val String = TableFieldSchemaType("STRING") - def string = String + val String: TableFieldSchemaType = TableFieldSchemaType("STRING") + def string: TableFieldSchemaType = String - val Bytes = TableFieldSchemaType("BYTES") - def bytes = Bytes + val Bytes: TableFieldSchemaType = TableFieldSchemaType("BYTES") + def bytes: TableFieldSchemaType = Bytes - val Integer = TableFieldSchemaType("INTEGER") - def integer = Integer + val Integer: TableFieldSchemaType = TableFieldSchemaType("INTEGER") + def integer: TableFieldSchemaType = Integer - val Float = TableFieldSchemaType("FLOAT") - def float64 = Float // float is a reserved keyword in Java + val Float: TableFieldSchemaType = TableFieldSchemaType("FLOAT") + def float64: TableFieldSchemaType = Float // float is a reserved keyword in Java - val Boolean = TableFieldSchemaType("BOOLEAN") - def bool = Boolean // boolean is a reserved keyword in Java + val Boolean: TableFieldSchemaType = TableFieldSchemaType("BOOLEAN") + def bool: TableFieldSchemaType = Boolean // boolean is a reserved keyword in Java - val Timestamp = TableFieldSchemaType("TIMESTAMP") - def timestamp = Timestamp + val Timestamp: TableFieldSchemaType = TableFieldSchemaType("TIMESTAMP") + def timestamp: TableFieldSchemaType = Timestamp - val Date = TableFieldSchemaType("DATE") - def date = Date + val Date: TableFieldSchemaType = TableFieldSchemaType("DATE") + def date: TableFieldSchemaType = Date - val Time = TableFieldSchemaType("TIME") - def time = Time + val Time: TableFieldSchemaType = TableFieldSchemaType("TIME") + def time: TableFieldSchemaType = Time - val DateTime = TableFieldSchemaType("DATETIME") - def dateTime = DateTime + val DateTime: TableFieldSchemaType = TableFieldSchemaType("DATETIME") + def dateTime: TableFieldSchemaType = DateTime - val Geography = TableFieldSchemaType("GEOGRAPHY") - def geography = Geography + val Geography: TableFieldSchemaType = TableFieldSchemaType("GEOGRAPHY") + def geography: TableFieldSchemaType = Geography - val Numeric = TableFieldSchemaType("NUMERIC") - def numeric = Numeric + val Numeric: TableFieldSchemaType = TableFieldSchemaType("NUMERIC") + def numeric: TableFieldSchemaType = Numeric - val BigNumeric = TableFieldSchemaType("BIGNUMERIC") - def bigNumeric = BigNumeric + val BigNumeric: TableFieldSchemaType = TableFieldSchemaType("BIGNUMERIC") + def bigNumeric: TableFieldSchemaType = BigNumeric - val Record = TableFieldSchemaType("RECORD") - def record = Record + val Record: TableFieldSchemaType = TableFieldSchemaType("RECORD") + def record: TableFieldSchemaType = Record implicit val format: JsonFormat[TableFieldSchemaType] = StringEnum.jsonFormat(apply) } @@ -335,16 +335,16 @@ object TableFieldSchemaMode { /** * Java API */ - def create(value: String) = TableFieldSchemaMode(value) + def create(value: String): TableFieldSchemaMode = TableFieldSchemaMode(value) - val Nullable = TableFieldSchemaMode("NULLABLE") - def nullable = Nullable + val Nullable: TableFieldSchemaMode = TableFieldSchemaMode("NULLABLE") + def nullable: TableFieldSchemaMode = Nullable - val Required = TableFieldSchemaMode("REQUIRED") - def required = Required + val Required: TableFieldSchemaMode = TableFieldSchemaMode("REQUIRED") + def required: TableFieldSchemaMode = Required - val Repeated = TableFieldSchemaMode("REPEATED") - def repeated = Repeated + val Repeated: TableFieldSchemaMode = TableFieldSchemaMode("REPEATED") + def repeated: TableFieldSchemaMode = Repeated implicit val format: JsonFormat[TableFieldSchemaMode] = StringEnum.jsonFormat(apply) } @@ -361,15 +361,15 @@ final case class TableListResponse private[bigquery] (nextPageToken: Option[Stri tables: Option[Seq[Table]], totalItems: Option[Int]) { - def getNextPageToken = nextPageToken.toJava - def getTables = tables.map(_.asJava).toJava - def getTotalItems = totalItems.toJavaPrimitive + def getNextPageToken: Optional[String] = nextPageToken.toJava + def getTables: Optional[util.List[Table]] = tables.map(_.asJava).toJava + def getTotalItems: OptionalInt = totalItems.toJavaPrimitive - def withNextPageToken(nextPageToken: util.Optional[String]) = + def withNextPageToken(nextPageToken: util.Optional[String]): TableListResponse = copy(nextPageToken = nextPageToken.toScala) - def withTables(tables: util.Optional[util.List[Table]]) = + def withTables(tables: util.Optional[util.List[Table]]): TableListResponse = copy(tables = tables.toScala.map(_.asScala.toList)) - def withTotalItems(totalItems: util.OptionalInt) = + def withTotalItems(totalItems: util.OptionalInt): TableListResponse = copy(totalItems = totalItems.toScala) } @@ -386,7 +386,7 @@ object TableListResponse { */ def createTableListResponse(nextPageToken: util.Optional[String], tables: util.Optional[util.List[Table]], - totalItems: util.OptionalInt) = + totalItems: util.OptionalInt): TableListResponse = TableListResponse(nextPageToken.toScala, tables.toScala.map(_.asScala.toList), totalItems.toScala) implicit val format: RootJsonFormat[TableListResponse] = jsonFormat3(apply) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala index 0f45b05f5..b1dc92b60 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala @@ -37,7 +37,7 @@ import pekko.stream.scaladsl.{ Flow, Keep, Sink, Source } import java.util.{ SplittableRandom, UUID } import scala.collection.immutable.Seq -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryTableData { this: BigQueryRest => @@ -129,7 +129,7 @@ private[scaladsl] trait BigQueryTableData { this: BigQueryRest => import BigQueryException._ import SprayJsonSupport._ implicit val system: ActorSystem = mat.system - implicit val ec = ExecutionContexts.parasitic + implicit val ec: ExecutionContext = ExecutionContexts.parasitic implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uri = BigQueryEndpoints.tableDataInsertAll(settings.projectId, datasetId, tableId) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/PrimitiveSchemaWriter.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/PrimitiveSchemaWriter.scala index b4c469b16..f41a7f9e8 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/PrimitiveSchemaWriter.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/PrimitiveSchemaWriter.scala @@ -21,8 +21,7 @@ import org.apache.pekko.stream.connectors.googlecloud.bigquery.model.{ private[schema] final class PrimitiveSchemaWriter[T](`type`: TableFieldSchemaType) extends SchemaWriter[T] { - override def write(name: String, mode: TableFieldSchemaMode): TableFieldSchema = { + override def write(name: String, mode: TableFieldSchemaMode): TableFieldSchema = TableFieldSchema(name, `type`, Some(mode), None) - } } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryBasicFormats.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryBasicFormats.scala index be569f7dd..591f75027 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryBasicFormats.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryBasicFormats.scala @@ -22,8 +22,8 @@ import spray.json.{ deserializationError, JsBoolean, JsFalse, JsNumber, JsString trait BigQueryBasicFormats { implicit object IntJsonFormat extends BigQueryJsonFormat[Int] { - def write(x: Int) = JsNumber(x) - def read(value: JsValue) = value match { + def write(x: Int): JsNumber = JsNumber(x) + def read(value: JsValue): Int = value match { case JsNumber(x) if x.isValidInt => x.intValue case BigQueryNumber(x) if x.isValidInt => x.intValue case x => deserializationError("Expected Int as JsNumber or JsString, but got " + x) @@ -31,12 +31,12 @@ trait BigQueryBasicFormats { } implicit object LongJsonFormat extends BigQueryJsonFormat[Long] { - def write(x: Long) = + def write(x: Long): JsValue = if (-9007199254740991L <= x & x <= 9007199254740991L) JsNumber(x) else JsString(x.toString) - def read(value: JsValue) = value match { + def read(value: JsValue): Long = value match { case JsNumber(x) if x.isValidLong => x.longValue case BigQueryNumber(x) if x.isValidLong => x.longValue case x => deserializationError("Expected Long as JsNumber or JsString, but got " + x) @@ -44,8 +44,8 @@ trait BigQueryBasicFormats { } implicit object FloatJsonFormat extends BigQueryJsonFormat[Float] { - def write(x: Float) = JsNumber(x) - def read(value: JsValue) = value match { + def write(x: Float): JsValue = JsNumber(x) + def read(value: JsValue): Float = value match { case JsNumber(x) => x.floatValue case BigQueryNumber(x) => x.floatValue case x => deserializationError("Expected Float as JsNumber or JsString, but got " + x) @@ -53,8 +53,8 @@ trait BigQueryBasicFormats { } implicit object DoubleJsonFormat extends BigQueryJsonFormat[Double] { - def write(x: Double) = JsNumber(x) - def read(value: JsValue) = value match { + def write(x: Double): JsValue = JsNumber(x) + def read(value: JsValue): Double = value match { case JsNumber(x) => x.doubleValue case BigQueryNumber(x) => x.doubleValue case x => deserializationError("Expected Double as JsNumber or JsString, but got " + x) @@ -62,8 +62,8 @@ trait BigQueryBasicFormats { } implicit object ByteJsonFormat extends BigQueryJsonFormat[Byte] { - def write(x: Byte) = JsNumber(x) - def read(value: JsValue) = value match { + def write(x: Byte): JsNumber = JsNumber(x) + def read(value: JsValue): Byte = value match { case JsNumber(x) if x.isValidByte => x.byteValue case BigQueryNumber(x) if x.isValidByte => x.byteValue case x => deserializationError("Expected Byte as JsNumber or JsString, but got " + x) @@ -71,8 +71,8 @@ trait BigQueryBasicFormats { } implicit object ShortJsonFormat extends BigQueryJsonFormat[Short] { - def write(x: Short) = JsNumber(x) - def read(value: JsValue) = value match { + def write(x: Short): JsNumber = JsNumber(x) + def read(value: JsValue): Short = value match { case JsNumber(x) if x.isValidShort => x.shortValue case BigQueryNumber(x) if x.isValidShort => x.shortValue case x => deserializationError("Expected Short as JsNumber or JsString, but got " + x) @@ -80,11 +80,11 @@ trait BigQueryBasicFormats { } implicit object BigDecimalJsonFormat extends BigQueryJsonFormat[BigDecimal] { - def write(x: BigDecimal) = { + def write(x: BigDecimal): JsString = { require(x ne null) JsString(x.toString) } - def read(value: JsValue) = value match { + def read(value: JsValue): BigDecimal = value match { case JsNumber(x) => x case BigQueryNumber(x) => x case x => deserializationError("Expected BigDecimal as JsNumber or JsString, but got " + x) @@ -92,11 +92,11 @@ trait BigQueryBasicFormats { } implicit object BigIntJsonFormat extends BigQueryJsonFormat[BigInt] { - def write(x: BigInt) = { + def write(x: BigInt): JsString = { require(x ne null) JsString(x.toString) } - def read(value: JsValue) = value match { + def read(value: JsValue): BigInt = value match { case JsNumber(x) => x.toBigInt case BigQueryNumber(x) => x.toBigInt case x => deserializationError("Expected BigInt as JsNumber or JsString, but got " + x) @@ -104,13 +104,13 @@ trait BigQueryBasicFormats { } implicit object UnitJsonFormat extends BigQueryJsonFormat[Unit] { - def write(x: Unit) = JsNumber(1) + def write(x: Unit): JsNumber = JsNumber(1) def read(value: JsValue): Unit = {} } implicit object BooleanJsonFormat extends BigQueryJsonFormat[Boolean] { - def write(x: Boolean) = JsBoolean(x) - def read(value: JsValue) = value match { + def write(x: Boolean): JsBoolean = JsBoolean(x) + def read(value: JsValue): Boolean = value match { case JsTrue | JsString("true") => true case JsFalse | JsString("false") => false case x => deserializationError("Expected Boolean as JsBoolean or JsString, but got " + x) @@ -118,27 +118,27 @@ trait BigQueryBasicFormats { } implicit object CharJsonFormat extends BigQueryJsonFormat[Char] { - def write(x: Char) = JsString(String.valueOf(x)) - def read(value: JsValue) = value match { + def write(x: Char): JsString = JsString(String.valueOf(x)) + def read(value: JsValue): Char = value match { case JsString(x) if x.length == 1 => x.charAt(0) case x => deserializationError("Expected Char as single-character JsString, but got " + x) } } implicit object StringJsonFormat extends BigQueryJsonFormat[String] { - def write(x: String) = { + def write(x: String): JsString = { require(x ne null) JsString(x) } - def read(value: JsValue) = value match { + def read(value: JsValue): String = value match { case JsString(x) => x case x => deserializationError("Expected String as JsString, but got " + x) } } implicit object SymbolJsonFormat extends BigQueryJsonFormat[Symbol] { - def write(x: Symbol) = JsString(x.name) - def read(value: JsValue) = value match { + def write(x: Symbol): JsString = JsString(x.name) + def read(value: JsValue): Symbol = value match { case JsString(x) => Symbol(x) case x => deserializationError("Expected Symbol as JsString, but got " + x) } @@ -146,8 +146,8 @@ trait BigQueryBasicFormats { implicit object ByteStringJsonFormat extends BigQueryJsonFormat[ByteString] { import java.nio.charset.StandardCharsets.US_ASCII - def write(x: ByteString) = JsString(x.encodeBase64.decodeString(US_ASCII)) - def read(value: JsValue) = value match { + def write(x: ByteString): JsString = JsString(x.encodeBase64.decodeString(US_ASCII)) + def read(value: JsValue): ByteString = value match { case BigQueryBytes(x) => x case x => deserializationError("Expected ByteString as JsString, but got " + x) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala index 847498fef..534a5d3ce 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala @@ -23,7 +23,7 @@ trait BigQueryCollectionFormats { * Supplies the BigQueryJsonFormat for Lists. */ implicit def listFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[List[T]] = new BigQueryJsonFormat[List[T]] { - def write(list: List[T]) = JsArray(list.map(_.toJson).toVector) + def write(list: List[T]): JsArray = JsArray(list.map(_.toJson).toVector) def read(value: JsValue): List[T] = value match { case JsArray(elements) => elements.iterator.map(_.asJsObject.fields("v").convertTo[T]).toList case x => deserializationError("Expected List as JsArray, but got " + x) @@ -35,8 +35,8 @@ trait BigQueryCollectionFormats { */ implicit def arrayFormat[T: BigQueryJsonFormat: ClassTag]: BigQueryJsonFormat[Array[T]] = new BigQueryJsonFormat[Array[T]] { - def write(array: Array[T]) = JsArray(array.map(_.toJson).toVector) - def read(value: JsValue) = value match { + def write(array: Array[T]): JsArray = JsArray(array.map(_.toJson).toVector) + def read(value: JsValue): Array[T] = value match { case JsArray(elements) => elements.map(_.asJsObject.fields("v").convertTo[T]).toArray[T] case x => deserializationError("Expected Array as JsArray, but got " + x) } @@ -71,8 +71,8 @@ trait BigQueryCollectionFormats { */ def viaSeq[I <: Iterable[T], T: BigQueryJsonFormat](f: imm.Seq[T] => I): BigQueryJsonFormat[I] = new BigQueryJsonFormat[I] { - def write(iterable: I) = JsArray(iterable.map(_.toJson).toVector) - def read(value: JsValue) = value match { + def write(iterable: I): JsArray = JsArray(iterable.map(_.toJson).toVector) + def read(value: JsValue): I = value match { case JsArray(elements) => f(elements.map(_.asJsObject.fields("v").convertTo[T])) case x => deserializationError("Expected Collection as JsArray, but got " + x) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala index 8c84f2997..c9998d646 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala @@ -36,8 +36,8 @@ trait BigQueryRestBasicFormats { implicit val SymbolJsonFormat: JsonFormat[Symbol] = DefaultJsonProtocol.SymbolJsonFormat implicit object BigQueryLongJsonFormat extends JsonFormat[Long] { - def write(x: Long) = JsNumber(x) - def read(value: JsValue) = value match { + def write(x: Long): JsNumber = JsNumber(x) + def read(value: JsValue): Long = value match { case JsNumber(x) if x.isValidLong => x.longValue case BigQueryNumber(x) if x.isValidLong => x.longValue case x => deserializationError("Expected Long as JsNumber or JsString, but got " + x) diff --git a/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala b/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala index 2c71e35e2..3e9e7550f 100644 --- a/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala +++ b/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala @@ -88,11 +88,9 @@ class BigQueryDoc { val people: List[Person] = ??? // #job-status - def checkIfJobsDone(jobReferences: Seq[JobReference]): Future[Boolean] = { - for { - jobs <- Future.sequence(jobReferences.map(ref => BigQuery.job(ref.jobId.get))) - } yield jobs.forall(job => job.status.exists(_.state == JobState.Done)) - } + def checkIfJobsDone(jobReferences: Seq[JobReference]): Future[Boolean] = for { + jobs <- Future.sequence(jobReferences.map(ref => BigQuery.job(ref.jobId.get))) + } yield jobs.forall(job => job.status.exists(_.state == JobState.Done)) val isDone: Future[Boolean] = for { jobs <- Source(people).via(peopleLoadFlow).runWith(Sink.seq) diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala index f79f96c26..8423b7a72 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala @@ -25,7 +25,8 @@ import com.fasterxml.jackson.databind.ser.std.ToStringSerializer import java.time.{ Instant, LocalDate, LocalDateTime, LocalTime } @JsonPropertyOrder(alphabetic = true) -case class A(integer: Int, long: Long, float: Float, double: Double, string: String, boolean: Boolean, record: B) { +final case class A(integer: Int, long: Long, float: Float, double: Double, string: String, boolean: Boolean, + record: B) { @JsonCreator def this(@JsonProperty("f") f: JsonNode) = @@ -51,7 +52,7 @@ case class A(integer: Int, long: Long, float: Float, double: Double, string: Str @JsonPropertyOrder(alphabetic = true) @JsonInclude(Include.NON_NULL) -case class B(nullable: Option[String], bytes: ByteString, repeated: Seq[C]) { +final case class B(nullable: Option[String], bytes: ByteString, repeated: Seq[C]) { def this(node: JsonNode) = this( Option(node.get("f").get(0).get("v").textValue()), @@ -64,7 +65,7 @@ case class B(nullable: Option[String], bytes: ByteString, repeated: Seq[C]) { } @JsonPropertyOrder(alphabetic = true) -case class C(numeric: BigDecimal, date: LocalDate, time: LocalTime, dateTime: LocalDateTime, timestamp: Instant) { +final case class C(numeric: BigDecimal, date: LocalDate, time: LocalTime, dateTime: LocalDateTime, timestamp: Instant) { def this(node: JsonNode) = this( diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala index a18ad78e5..5ec5bc5be 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala @@ -51,7 +51,7 @@ class BigQueryEndToEndSpec } } - override def afterAll() = { + override def afterAll(): Unit = { system.terminate() if (hoverfly.getMode == HoverflyMode.CAPTURE) hoverfly.exportSimulation(new File("hoverfly/BigQueryEndToEndSpec.json").toPath) diff --git a/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes b/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..63d2ec397 --- /dev/null +++ b/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcBigQueryStorageReaderExt-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.scaladsl.GrpcBigQueryStorageReaderExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.scaladsl.GrpcBigQueryStorageReaderExt.lookup") diff --git a/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes b/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..aaf87c300 --- /dev/null +++ b/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcPublisherExt-more-specific-type.backwards.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcPublisherExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcPublisherExt.lookup") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcPublisherExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcPublisherExt.lookup") diff --git a/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes b/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..d530646f2 --- /dev/null +++ b/google-cloud-pub-sub-grpc/src/main/mima-filters/1.1.x.backwards.excludes/GrpcSubscriberExt-more-specific-type.backwards.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcSubscriberExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.javadsl.GrpcSubscriberExt.lookup") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcSubscriberExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.pubsub.grpc.scaladsl.GrpcSubscriberExt.lookup") diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala index 9fd70656a..ec0ac46df 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala @@ -78,7 +78,7 @@ final class GrpcPublisherExt private (sys: ExtendedActorSystem) extends Extensio } object GrpcPublisherExt extends ExtensionId[GrpcPublisherExt] with ExtensionIdProvider { - override def lookup = GrpcPublisherExt + override def lookup: GrpcPublisherExt.type = GrpcPublisherExt override def createExtension(system: ExtendedActorSystem) = new GrpcPublisherExt(system) /** diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala index de2e738ad..e5fc28532 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala @@ -79,7 +79,7 @@ final class GrpcSubscriberExt private (sys: ExtendedActorSystem) extends Extensi } object GrpcSubscriberExt extends ExtensionId[GrpcSubscriberExt] with ExtensionIdProvider { - override def lookup = GrpcSubscriberExt + override def lookup: GrpcSubscriberExt.type = GrpcSubscriberExt override def createExtension(system: ExtendedActorSystem) = new GrpcSubscriberExt(system) /** diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala index 128215c15..030826ec0 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala @@ -94,7 +94,7 @@ object GooglePubSub { Source .tick(0.seconds, pollInterval, request) .mapMaterializedValue(cancellable.success) - .mapAsync(1)(client.pull(_)) + .mapAsync(1)(client.pull) .mapConcat(_.receivedMessages.toVector) .mapMaterializedValue(_ => cancellable.future) } @@ -121,7 +121,7 @@ object GooglePubSub { * * @param parallelism controls how many acknowledgements can be in-flight at any given time */ - def acknowledge(parallelism: Int): Sink[AcknowledgeRequest, Future[Done]] = { + def acknowledge(parallelism: Int): Sink[AcknowledgeRequest, Future[Done]] = Sink .fromMaterializer { (mat, attr) => Flow[AcknowledgeRequest] @@ -129,7 +129,6 @@ object GooglePubSub { .toMat(Sink.ignore)(Keep.right) } .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) - } private def publisher(mat: Materializer, attr: Attributes) = attr diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala index 286c6d78e..ae9f53f1d 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala @@ -69,7 +69,7 @@ final class GrpcPublisherExt private (sys: ExtendedActorSystem) extends Extensio } object GrpcPublisherExt extends ExtensionId[GrpcPublisherExt] with ExtensionIdProvider { - override def lookup = GrpcPublisherExt + override def lookup: GrpcPublisherExt.type = GrpcPublisherExt override def createExtension(system: ExtendedActorSystem) = new GrpcPublisherExt(system) /** diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala index 0d70d861d..af7fe8540 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala @@ -69,7 +69,7 @@ final class GrpcSubscriberExt private (sys: ExtendedActorSystem) extends Extensi } object GrpcSubscriberExt extends ExtensionId[GrpcSubscriberExt] with ExtensionIdProvider { - override def lookup = GrpcSubscriberExt + override def lookup: GrpcSubscriberExt.type = GrpcSubscriberExt override def createExtension(system: ExtendedActorSystem) = new GrpcSubscriberExt(system) /** diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala index d76e43398..826b2797a 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala @@ -85,7 +85,7 @@ object ExampleApp { .map(publish(projectId, topic)(_)) .via(GooglePubSub.publish(parallelism = 1)) .to(Sink.ignore) - .mapMaterializedValue(Future.successful(_)) + .mapMaterializedValue(Future.successful) .run() } diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala index 3e088cecc..4dd59d8e1 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -281,7 +281,7 @@ class IntegrationSpec } } - override def afterAll() = + override def afterAll(): Unit = system.terminate() } diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 90dd33581..b97b894be 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -42,15 +42,15 @@ import scala.util.Try */ @InternalApi private[pubsub] object PubSubApi extends PubSubApi { - val DefaultPubSubGoogleApisHost = "pubsub.googleapis.com" - val DefaultPubSubGoogleApisPort = 443 + private val DefaultPubSubGoogleApisHost = "pubsub.googleapis.com" + private val DefaultPubSubGoogleApisPort = 443 val PubSubEmulatorHostVarName = "PUBSUB_EMULATOR_HOST" - val PubSubEmulatorPortVarName = "PUBSUB_EMULATOR_PORT" + private val PubSubEmulatorPortVarName = "PUBSUB_EMULATOR_PORT" val PubSubGoogleApisHost: String = PubSubEmulatorHost.getOrElse(DefaultPubSubGoogleApisHost) val PubSubGoogleApisPort: Int = PubSubEmulatorPort.getOrElse(DefaultPubSubGoogleApisPort) - override def isEmulated = PubSubEmulatorHost.nonEmpty + override def isEmulated: Boolean = PubSubEmulatorHost.nonEmpty private[pubsub] lazy val PubSubEmulatorHost: Option[String] = sys.props .get(PubSubEmulatorHostVarName) diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/model.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/model.scala index 0bed06463..1a43f3242 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/model.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/model.scala @@ -136,13 +136,15 @@ final class PublishMessage private (val data: String, } object PublishMessage { - def apply(data: String, attributes: immutable.Map[String, String]) = new PublishMessage(data, Some(attributes), None) - def apply(data: String, attributes: Option[immutable.Map[String, String]], orderingKey: Option[String]) = + def apply(data: String, attributes: immutable.Map[String, String]): PublishMessage = + new PublishMessage(data, Some(attributes), None) + def apply( + data: String, attributes: Option[immutable.Map[String, String]], orderingKey: Option[String]): PublishMessage = new PublishMessage(data, attributes, orderingKey) - def apply(data: String, attributes: Option[immutable.Map[String, String]]) = + def apply(data: String, attributes: Option[immutable.Map[String, String]]): PublishMessage = new PublishMessage(data, attributes, None) - def apply(data: String) = new PublishMessage(data, None, None) - def create(data: String) = new PublishMessage(data, None, None) + def apply(data: String): PublishMessage = new PublishMessage(data, None, None) + def create(data: String): PublishMessage = new PublishMessage(data, None, None) /** * Java API @@ -204,14 +206,14 @@ object PubSubMessage { def apply(data: Option[String], attributes: Option[immutable.Map[String, String]], messageId: String, - publishTime: Instant) = + publishTime: Instant): PubSubMessage = new PubSubMessage(data, attributes, messageId, publishTime, None) def apply(data: Option[String] = None, attributes: Option[immutable.Map[String, String]] = None, messageId: String, publishTime: Instant, - orderingKey: Option[String] = None) = + orderingKey: Option[String] = None): PubSubMessage = new PubSubMessage(data, attributes, messageId, publishTime, orderingKey) /** @@ -276,7 +278,7 @@ final class ReceivedMessage private (val ackId: String, val message: PubSubMessa override def hashCode: Int = java.util.Objects.hash(ackId, message) - override def toString: String = "ReceivedMessage(ackId=" + ackId.toString + ",message=" + message.toString + ")" + override def toString: String = "ReceivedMessage(ackId=" + ackId + ",message=" + message.toString + ")" } object ReceivedMessage { @@ -348,7 +350,7 @@ private final class PullResponse private[pubsub] (val receivedMessages: Option[i object PullResponse { - @InternalApi private[pubsub] def apply(receivedMessages: Option[immutable.Seq[ReceivedMessage]]) = + @InternalApi private[pubsub] def apply(receivedMessages: Option[immutable.Seq[ReceivedMessage]]): PullResponse = new PullResponse(receivedMessages) } diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/scaladsl/GooglePubSub.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/scaladsl/GooglePubSub.scala index 9cee93b6e..1d27ebe5a 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/scaladsl/GooglePubSub.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/scaladsl/GooglePubSub.scala @@ -129,19 +129,18 @@ protected[pubsub] trait GooglePubSub { /** * Creates a source pulling messages from a subscription. */ - def subscribe(subscription: String, config: PubSubConfig): Source[ReceivedMessage, Cancellable] = { + def subscribe(subscription: String, config: PubSubConfig): Source[ReceivedMessage, Cancellable] = Source .tick(0.seconds, 1.second, Done) .via(subscribeFlow(subscription, config)) - } /** * Creates a flow pulling messages from a subscription. */ - def subscribeFlow(subscription: String, config: PubSubConfig): Flow[Done, ReceivedMessage, Future[NotUsed]] = { + def subscribeFlow(subscription: String, config: PubSubConfig): Flow[Done, ReceivedMessage, Future[NotUsed]] = flow(config)(httpApi.pull(subscription, config.pullReturnImmediately, config.pullMaxMessagesPerInternalBatch)) .mapConcat(_.receivedMessages.getOrElse(Seq.empty[ReceivedMessage]).toIndexedSeq) - }.mapMaterializedValue(_ => Future.successful(NotUsed)) + .mapMaterializedValue(_ => Future.successful(NotUsed)) /** * Creates a flow for acknowledging messages on a subscription. diff --git a/google-cloud-pub-sub/src/test/java/docs/javadsl/ExampleUsageJava.java b/google-cloud-pub-sub/src/test/java/docs/javadsl/ExampleUsageJava.java index 437fadc92..26d8792d1 100644 --- a/google-cloud-pub-sub/src/test/java/docs/javadsl/ExampleUsageJava.java +++ b/google-cloud-pub-sub/src/test/java/docs/javadsl/ExampleUsageJava.java @@ -79,7 +79,7 @@ private static void example() throws NoSuchAlgorithmException, InvalidKeySpecExc Source messageSource = Source.single(publishMessage); messageSource .groupedWithin(1000, Duration.ofMinutes(1)) - .map(messages -> PublishRequest.create(messages)) + .map(PublishRequest::create) .via(publishFlow) .runWith(Sink.ignore(), system); // #publish-fast @@ -108,30 +108,25 @@ private static void example() throws NoSuchAlgorithmException, InvalidKeySpecExc Sink> ackSink = GooglePubSub.acknowledge(subscription, config); - subscriptionSource - .map( - message -> { - // do something fun - return message.ackId(); - }) + // do something fun + subscriptionSource + .map(ReceivedMessage::ackId) .groupedWithin(1000, Duration.ofMinutes(1)) - .map(acks -> AcknowledgeRequest.create(acks)) + .map(AcknowledgeRequest::create) .to(ackSink); // #subscribe // #subscribe-source-control - Source.tick(Duration.ofSeconds(0), Duration.ofSeconds(10), Done.getInstance()) + // do something fun + Source.tick(Duration.ofSeconds(0), Duration.ofSeconds(10), Done.getInstance()) .via( RestartFlow.withBackoff( RestartSettings.create(Duration.ofSeconds(1), Duration.ofSeconds(30), 0.2), () -> GooglePubSub.subscribeFlow(subscription, config))) - .map( - message -> { - // do something fun - return message.ackId(); - }) + // do something fun + .map(ReceivedMessage::ackId) .groupedWithin(1000, Duration.ofMinutes(1)) - .map(acks -> AcknowledgeRequest.create(acks)) + .map(AcknowledgeRequest::create) .to(ackSink); // #subscribe-source-control @@ -142,9 +137,9 @@ private static void example() throws NoSuchAlgorithmException, InvalidKeySpecExc Sink batchAckSink = Flow.of(ReceivedMessage.class) - .map(t -> t.ackId()) + .map(ReceivedMessage::ackId) .groupedWithin(1000, Duration.ofMinutes(1)) - .map(ids -> AcknowledgeRequest.create(ids)) + .map(AcknowledgeRequest::create) .to(ackSink); subscriptionSource.alsoTo(batchAckSink).to(processSink); diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala index 03c179ca5..2dc9d1160 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala @@ -47,9 +47,8 @@ class GooglePubSubSpec implicit val system: ActorSystem = ActorSystem() - override protected def afterAll(): Unit = { + override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system) - } private trait Fixtures { lazy val mockHttpApi = mock[PubSubApi] diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala index f443719d1..c245192e4 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala @@ -124,7 +124,7 @@ class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures val result = Source.single((publishRequest, ())).via(flow).toMat(Sink.head)(Keep.right).run() result.futureValue._1.messageIds shouldBe Seq("1") - result.futureValue._2 shouldBe (()) + result.futureValue._2 shouldBe () } it should "publish with ordering key" in { @@ -155,7 +155,7 @@ class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures val result = Source.single((publishRequest, ())).via(flow).toMat(Sink.head)(Keep.right).run() result.futureValue._1.messageIds shouldBe Seq("1") - result.futureValue._2 shouldBe (()) + result.futureValue._2 shouldBe () } it should "publish to overridden host" in { @@ -190,7 +190,7 @@ class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures val result = Source.single((publishRequest, ())).via(flow).toMat(Sink.head)(Keep.right).run() result.futureValue._1.messageIds shouldBe Seq("1") - result.futureValue._2 shouldBe (()) + result.futureValue._2 shouldBe () } it should "publish without Authorization header to emulator" in { @@ -219,7 +219,7 @@ class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures val result = Source.single((publishRequest, ())).via(flow).toMat(Sink.last)(Keep.right).run() result.futureValue._1.messageIds shouldBe Seq("1") - result.futureValue._2 shouldBe (()) + result.futureValue._2 shouldBe () } it should "Pull with results" in { diff --git a/google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSExt-more-specific-type.backwards.excludes b/google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..9cf199f89 --- /dev/null +++ b/google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSExt-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.storage.GCSExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.storage.GCSExt.lookup") diff --git a/google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSStorageExt-more-specific-type.backwards.excludes b/google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSStorageExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..9015c121d --- /dev/null +++ b/google-cloud-storage/src/main/mima-filters/1.1.x.backwards.excludes/GCSStorageExt-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.googlecloud.storage.GCStorageExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.googlecloud.storage.GCStorageExt.lookup") diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Bucket.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Bucket.scala index b62b2a1d4..f06d2dc4f 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Bucket.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Bucket.scala @@ -70,7 +70,7 @@ final class Bucket private ( selfLink = selfLink, etag = etag) - override def toString = + override def toString: String = "BucketInfo(" + s"name=$name," + s"location=$location," + diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/FailedUpload.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/FailedUpload.scala index 3d57f0ac1..a28626c4a 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/FailedUpload.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/FailedUpload.scala @@ -25,8 +25,8 @@ final class FailedUpload private ( object FailedUpload { - def apply(reasons: Seq[Throwable]) = new FailedUpload(reasons) + def apply(reasons: Seq[Throwable]): FailedUpload = new FailedUpload(reasons) /** Java API */ - def create(reasons: java.util.List[Throwable]) = FailedUpload(reasons.asScala.toList) + def create(reasons: java.util.List[Throwable]): FailedUpload = FailedUpload(reasons.asScala.toList) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSAttributes.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSAttributes.scala index 90048e90d..e1e400fff 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSAttributes.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSAttributes.scala @@ -34,13 +34,13 @@ object GCSAttributes { final class GCSSettingsPath private (val path: String) extends Attribute object GCSSettingsPath { - val Default = GCSSettingsPath(GCSSettings.ConfigPath) + val Default: GCSSettingsPath = GCSSettingsPath(GCSSettings.ConfigPath) - def apply(path: String) = new GCSSettingsPath(path) + def apply(path: String): GCSSettingsPath = new GCSSettingsPath(path) } final class GCSSettingsValue private (val settings: GCSSettings) extends Attribute object GCSSettingsValue { - def apply(settings: GCSSettings) = new GCSSettingsValue(settings) + def apply(settings: GCSSettings): GCSSettingsValue = new GCSSettingsValue(settings) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExt.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExt.scala index 91dc285f2..927d65d66 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExt.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExt.scala @@ -26,7 +26,7 @@ final class GCSExt private (sys: ExtendedActorSystem) extends Extension { } object GCSExt extends ExtensionId[GCSExt] with ExtensionIdProvider { - override def lookup = GCSExt + override def lookup: GCSExt.type = GCSExt override def createExtension(system: ExtendedActorSystem) = new GCSExt(system) /** diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageAttributes.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageAttributes.scala index 4602f0854..8388c4d19 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageAttributes.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageAttributes.scala @@ -49,9 +49,9 @@ final class GCStorageSettingsPath private (val path: String) extends Attribute @deprecated("Use org.apache.pekko.stream.connectors.google.GoogleAttributes", "Alpakka 3.0.0") @Deprecated object GCStorageSettingsPath { - val Default = GCStorageSettingsPath(GCStorageSettings.ConfigPath) + val Default: GCStorageSettingsPath = GCStorageSettingsPath(GCStorageSettings.ConfigPath) - def apply(path: String) = new GCStorageSettingsPath(path) + def apply(path: String): GCStorageSettingsPath = new GCStorageSettingsPath(path) } /** @@ -67,5 +67,5 @@ final class GCStorageSettingsValue private (val settings: GCStorageSettings) ext @deprecated("Use org.apache.pekko.stream.connectors.google.GoogleAttributes", "Alpakka 3.0.0") @Deprecated object GCStorageSettingsValue { - def apply(settings: GCStorageSettings) = new GCStorageSettingsValue(settings) + def apply(settings: GCStorageSettings): GCStorageSettingsValue = new GCStorageSettingsValue(settings) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExt.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExt.scala index cd366a64d..e2a01666c 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExt.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExt.scala @@ -34,7 +34,7 @@ final class GCStorageExt private (sys: ExtendedActorSystem) extends Extension { @deprecated("Use org.apache.pekko.stream.connectors.google.GoogleSettings", "Alpakka 3.0.0") @Deprecated object GCStorageExt extends ExtensionId[GCStorageExt] with ExtensionIdProvider { - override def lookup = GCStorageExt + override def lookup: GCStorageExt.type = GCStorageExt override def createExtension(system: ExtendedActorSystem) = new GCStorageExt(system) /** diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageSettings.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageSettings.scala index 5f15718e6..b1ff486f8 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageSettings.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageSettings.scala @@ -76,7 +76,7 @@ final class GCStorageSettings private ( tokenUrl = tokenUrl, tokenScope = tokenScope) - override def toString = + override def toString: String = "GCStorageSettings(" + s"projectId=$projectId," + s"clientEmail=$clientEmail," + @@ -192,12 +192,12 @@ object GCStorageSettings { def apply(system: ActorSystem): GCStorageSettings = apply(system.settings.config.getConfig(ConfigPath)) /** - * Java API: Creates [[S3Settings]] from the [[com.typesafe.config.Config Config]] attached to an actor system. + * Java API: Creates [[GCStorageSettings]] from the [[com.typesafe.config.Config Config]] attached to an actor system. */ def create(system: ClassicActorSystemProvider): GCStorageSettings = apply(system.classicSystem) /** - * Java API: Creates [[S3Settings]] from the [[com.typesafe.config.Config Config]] attached to an [[pekko.actor.ActorSystem]]. + * Java API: Creates [[GCStorageSettings]] from the [[com.typesafe.config.Config Config]] attached to an [[pekko.actor.ActorSystem]]. */ def create(system: ActorSystem): GCStorageSettings = apply(system) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala index 985603da0..d5777eda5 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala @@ -97,7 +97,7 @@ final class StorageObject private ( val acl: Option[List[ObjectAccessControls]]) { /** Java API */ - def getContentType: pekko.http.javadsl.model.ContentType = contentType.asInstanceOf[ContentType] + def getContentType: pekko.http.javadsl.model.ContentType = contentType def getTimeDeleted: Optional[OffsetDateTime] = timeDeleted.toJava def getContentDisposition: Optional[String] = contentDisposition.toJava def getContentEncoding: Optional[String] = contentEncoding.toJava @@ -162,7 +162,7 @@ final class StorageObject private ( name: String = name, bucket: String = bucket, generation: Long = generation, - contentType: ContentType = maybeContentType.getOrElse(null), + contentType: ContentType = maybeContentType.orNull, maybeContentType: Option[ContentType] = maybeContentType, size: Long = size, etag: String = etag, @@ -230,7 +230,7 @@ final class StorageObject private ( owner = owner, acl = acl) - override def toString = + override def toString: String = "StorageObject(" + s"kind=$kind," + s"id=$id," + diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala index 150305568..255965ff8 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala @@ -180,7 +180,7 @@ import scala.concurrent.{ ExecutionContext, Future } sealed trait RewriteState case object Starting extends RewriteState - case class Running(rewriteToken: String) extends RewriteState + final case class Running(rewriteToken: String) extends RewriteState case object Finished extends RewriteState val sourcePath = getObjectPath(sourceBucket, sourceObjectName) diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala index b085ae64f..a6c1e805e 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala @@ -408,7 +408,5 @@ object GCStorage { def deleteObjectsByPrefix(bucket: String, prefix: String): Source[java.lang.Boolean, NotUsed] = GCStorageStream.deleteObjectsByPrefixSource(bucket, Option(prefix)).map(boolean2Boolean).asJava - private def func[T, R](f: T => R) = new pekko.japi.function.Function[T, R] { - override def apply(param: T): R = f(param) - } + private def func[T, R](f: T => R): pekko.japi.function.Function[T, R] = (param: T) => f(param) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/settings.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/settings.scala index 96ecc1b16..8442c37ce 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/settings.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/settings.scala @@ -20,7 +20,7 @@ import com.typesafe.config.Config import java.util.Objects object GCSSettings { - val ConfigPath = "pekko.connectors.google.cloud-storage" + val ConfigPath: String = "pekko.connectors.google.cloud-storage" /** * Reads from the given config. diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala index 35f18241c..1aee28ffc 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala @@ -100,7 +100,7 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove } } - def mockTokenApi: SimulationSource = { + def mockTokenApi: SimulationSource = dsl( service("oauth2.googleapis.com") .post("/token") @@ -111,7 +111,6 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove .header("Content-Type", "application/json") .body( s"""{"access_token": "${TestCredentials.accessToken}", "token_type": "String", "expires_in": 3600}"""))) - } def storageService = service("storage.googleapis.com") diff --git a/google-common/src/main/mima-filters/1.1.x.backwards.excludes/XUploadContentType-more-specific-type.backwards.excludes b/google-common/src/main/mima-filters/1.1.x.backwards.excludes/XUploadContentType-more-specific-type.backwards.excludes new file mode 100644 index 000000000..38b485971 --- /dev/null +++ b/google-common/src/main/mima-filters/1.1.x.backwards.excludes/XUploadContentType-more-specific-type.backwards.excludes @@ -0,0 +1 @@ +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.google.scaladsl.X-Upload-Content-Type.companion") diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleExt.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleExt.scala index bb6680a89..8b48929dc 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleExt.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleExt.scala @@ -47,8 +47,8 @@ private[google] object GoogleExt extends ExtensionId[GoogleExt] with ExtensionId def apply()(implicit system: ActorSystem): GoogleExt = super.apply(system) - override def lookup = GoogleExt - override def createExtension(system: ExtendedActorSystem) = new GoogleExt(system) + override def lookup: GoogleExt.type = GoogleExt + override def createExtension(system: ExtendedActorSystem): GoogleExt = new GoogleExt(system) /** * Java API. diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala index ae402473f..016a49e27 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala @@ -34,7 +34,7 @@ import java.util.Optional import scala.concurrent.duration._ object GoogleSettings { - val ConfigPath = "pekko.connectors.google" + val ConfigPath: String = "pekko.connectors.google" /** * Reads from the given config. @@ -50,7 +50,7 @@ object GoogleSettings { /** * Java API: Reads from the given config. */ - def create(c: Config, system: ClassicActorSystemProvider) = + def create(c: Config, system: ClassicActorSystemProvider): GoogleSettings = apply(c)(system) /** @@ -85,7 +85,7 @@ object GoogleSettings { /** * Java API */ - def create(projectId: String, credentials: Credentials, requestSettings: RequestSettings) = + def create(projectId: String, credentials: Credentials, requestSettings: RequestSettings): GoogleSettings = GoogleSettings(projectId, credentials, requestSettings) } @@ -94,15 +94,15 @@ object GoogleSettings { final case class GoogleSettings(projectId: String, credentials: Credentials, requestSettings: RequestSettings) { - def getProjectId = projectId - def getCredentials = credentials - def getRequestSettings = requestSettings + def getProjectId: String = projectId + def getCredentials: Credentials = credentials + def getRequestSettings: RequestSettings = requestSettings - def withProjectId(projectId: String) = + def withProjectId(projectId: String): GoogleSettings = copy(projectId = projectId) - def withCredentials(credentials: Credentials) = + def withCredentials(credentials: Credentials): GoogleSettings = copy(credentials = credentials) - def withRequestSettings(requestSettings: RequestSettings) = + def withRequestSettings(requestSettings: RequestSettings): GoogleSettings = copy(requestSettings = requestSettings) } @@ -124,14 +124,14 @@ object RequestSettings { maybeForwardProxy) } - def create(config: Config)(implicit system: ClassicActorSystemProvider) = apply(config) + def create(config: Config)(implicit system: ClassicActorSystemProvider): RequestSettings = apply(config) def create(userIp: Optional[String], quotaUser: Optional[String], prettyPrint: Boolean, chunkSize: Int, retrySettings: RetrySettings, - forwardProxy: Optional[ForwardProxy]) = + forwardProxy: Optional[ForwardProxy]): RequestSettings = apply(userIp.toScala, quotaUser.toScala, prettyPrint, chunkSize, retrySettings, forwardProxy.toScala) } @@ -148,30 +148,30 @@ final case class RequestSettings( (uploadChunkSize >= (256 * 1024)) & (uploadChunkSize % (256 * 1024) == 0), "Chunk size must be a multiple of 256 KiB") - def getUserIp = userIp.toJava - def getQuotaUser = quotaUser.toJava - def getPrettyPrint = prettyPrint - def getUploadChunkSize = uploadChunkSize - def getRetrySettings = retrySettings - def getForwardProxy = forwardProxy + def getUserIp: Optional[String] = userIp.toJava + def getQuotaUser: Optional[String] = quotaUser.toJava + def getPrettyPrint: Boolean = prettyPrint + def getUploadChunkSize: Int = uploadChunkSize + def getRetrySettings: RetrySettings = retrySettings + def getForwardProxy: Option[ForwardProxy] = forwardProxy - def withUserIp(userIp: Option[String]) = + def withUserIp(userIp: Option[String]): RequestSettings = copy(userIp = userIp) - def withUserIp(userIp: Optional[String]) = + def withUserIp(userIp: Optional[String]): RequestSettings = copy(userIp = userIp.toScala) - def withQuotaUser(quotaUser: Option[String]) = + def withQuotaUser(quotaUser: Option[String]): RequestSettings = copy(quotaUser = quotaUser) - def withQuotaUser(quotaUser: Optional[String]) = + def withQuotaUser(quotaUser: Optional[String]): RequestSettings = copy(quotaUser = quotaUser.toScala) - def withPrettyPrint(prettyPrint: Boolean) = + def withPrettyPrint(prettyPrint: Boolean): RequestSettings = copy(prettyPrint = prettyPrint) - def withUploadChunkSize(uploadChunkSize: Int) = + def withUploadChunkSize(uploadChunkSize: Int): RequestSettings = copy(uploadChunkSize = uploadChunkSize) - def withRetrySettings(retrySettings: RetrySettings) = + def withRetrySettings(retrySettings: RetrySettings): RequestSettings = copy(retrySettings = retrySettings) - def withForwardProxy(forwardProxy: Option[ForwardProxy]) = + def withForwardProxy(forwardProxy: Option[ForwardProxy]): RequestSettings = copy(forwardProxy = forwardProxy) - def withForwardProxy(forwardProxy: Optional[ForwardProxy]) = + def withForwardProxy(forwardProxy: Optional[ForwardProxy]): RequestSettings = copy(forwardProxy = forwardProxy.toScala) // Cache query string @@ -183,17 +183,17 @@ final case class RequestSettings( object RetrySettings { - def apply(config: Config): RetrySettings = { + def apply(config: Config): RetrySettings = RetrySettings( config.getInt("max-retries"), config.getDuration("min-backoff").asScala, config.getDuration("max-backoff").asScala, config.getDouble("random-factor")) - } - def create(config: Config) = apply(config) + def create(config: Config): RetrySettings = apply(config) - def create(maxRetries: Int, minBackoff: time.Duration, maxBackoff: time.Duration, randomFactor: Double) = + def create( + maxRetries: Int, minBackoff: time.Duration, maxBackoff: time.Duration, randomFactor: Double): RetrySettings = apply( maxRetries, minBackoff.asScala, @@ -205,22 +205,22 @@ final case class RetrySettings @InternalApi private (maxRetries: Int, minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double) { - def getMaxRetries = maxRetries - def getMinBackoff = minBackoff.asJava - def getMaxBackoff = maxBackoff.asJava - def getRandomFactor = randomFactor + def getMaxRetries: Int = maxRetries + def getMinBackoff: time.Duration = minBackoff.asJava + def getMaxBackoff: time.Duration = maxBackoff.asJava + def getRandomFactor: Double = randomFactor - def withMaxRetries(maxRetries: Int) = + def withMaxRetries(maxRetries: Int): RetrySettings = copy(maxRetries = maxRetries) - def withMinBackoff(minBackoff: FiniteDuration) = + def withMinBackoff(minBackoff: FiniteDuration): RetrySettings = copy(minBackoff = minBackoff) - def withMinBackoff(minBackoff: time.Duration) = + def withMinBackoff(minBackoff: time.Duration): RetrySettings = copy(minBackoff = minBackoff.asScala) - def withMaxBackoff(maxBackoff: FiniteDuration) = + def withMaxBackoff(maxBackoff: FiniteDuration): RetrySettings = copy(maxBackoff = maxBackoff) - def withMaxBackoff(maxBackoff: time.Duration) = + def withMaxBackoff(maxBackoff: time.Duration): RetrySettings = copy(maxBackoff = maxBackoff.asScala) - def withRandomFactor(randomFactor: Double) = + def withRandomFactor(randomFactor: Double): RetrySettings = copy(randomFactor = randomFactor) } @@ -245,28 +245,28 @@ object ForwardProxy { ForwardProxy(scheme, c.getString("host"), c.getInt("port"), maybeCredentials, maybeTrustPem) } - def create(c: Config, system: ClassicActorSystemProvider) = + def create(c: Config, system: ClassicActorSystemProvider): ForwardProxy = apply(c)(system) def apply(scheme: String, host: String, port: Int, credentials: Option[BasicHttpCredentials], - trustPem: Option[String])(implicit system: ClassicActorSystemProvider): ForwardProxy = { + trustPem: Option[String])(implicit system: ClassicActorSystemProvider): ForwardProxy = ForwardProxy( trustPem.fold(Http(system.classicSystem).defaultClientHttpsContext)(ForwardProxyHttpsContext(_)), ForwardProxyPoolSettings(scheme, host, port, credentials)(system.classicSystem)) - } def create(scheme: String, host: String, port: Int, credentials: Optional[jm.headers.BasicHttpCredentials], trustPem: Optional[String], - system: ClassicActorSystemProvider) = + system: ClassicActorSystemProvider): ForwardProxy = apply(scheme, host, port, credentials.toScala.map(_.asInstanceOf[BasicHttpCredentials]), trustPem.toScala)(system) - def create(connectionContext: jh.HttpConnectionContext, poolSettings: jh.settings.ConnectionPoolSettings) = + def create( + connectionContext: jh.HttpConnectionContext, poolSettings: jh.settings.ConnectionPoolSettings): ForwardProxy = apply(connectionContext.asInstanceOf[HttpsConnectionContext], poolSettings.asInstanceOf[ConnectionPoolSettings]) } @@ -274,12 +274,12 @@ final case class ForwardProxy @InternalApi private (connectionContext: HttpsConn poolSettings: ConnectionPoolSettings) { def getConnectionContext: jh.HttpsConnectionContext = connectionContext def getPoolSettings: jh.settings.ConnectionPoolSettings = poolSettings - def withConnectionContext(connectionContext: HttpsConnectionContext) = + def withConnectionContext(connectionContext: HttpsConnectionContext): ForwardProxy = copy(connectionContext = connectionContext) - def withConnectionContext(connectionContext: jh.HttpsConnectionContext) = + def withConnectionContext(connectionContext: jh.HttpsConnectionContext): ForwardProxy = copy(connectionContext = connectionContext.asInstanceOf[HttpsConnectionContext]) - def withPoolSettings(poolSettings: ConnectionPoolSettings) = + def withPoolSettings(poolSettings: ConnectionPoolSettings): ForwardProxy = copy(poolSettings = poolSettings) - def withPoolSettings(poolSettings: jh.settings.ConnectionPoolSettings) = + def withPoolSettings(poolSettings: jh.settings.ConnectionPoolSettings): ForwardProxy = copy(poolSettings = poolSettings.asInstanceOf[ConnectionPoolSettings]) } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala index 20fbf7004..e7531c743 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala @@ -74,7 +74,6 @@ private[connectors] object PaginatedRequest { .mapMaterializedValue(_ => NotUsed) } - private def addPageToken(request: HttpRequest, query: Query): String => HttpRequest = { pageToken => + private def addPageToken(request: HttpRequest, query: Query): String => HttpRequest = pageToken => request.withUri(request.uri.withQuery(Query.Cons("pageToken", pageToken, query))) - } } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index c6a78b9cb..14589bbe8 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -181,9 +181,9 @@ private[connectors] object ResumableUpload { Flow[ByteString] .statefulMap(() => ByteString.newBuilder)((chunkBuilder, bytes) => { chunkBuilder ++= bytes - if (chunkBuilder.length < chunkSize) { + if (chunkBuilder.length < chunkSize) (chunkBuilder, ByteString.empty) - } else if (chunkBuilder.length == chunkSize) { + else if (chunkBuilder.length == chunkSize) { val chunk = chunkBuilder.result() chunkBuilder.clear() (chunkBuilder, chunk) diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ComputeEngineCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ComputeEngineCredentials.scala index 3709b096d..461f675c3 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ComputeEngineCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ComputeEngineCredentials.scala @@ -25,11 +25,10 @@ import scala.concurrent.Future @InternalApi private[auth] object ComputeEngineCredentials { - def apply(scopes: Set[String])(implicit system: ClassicActorSystemProvider): Future[Credentials] = { + def apply(scopes: Set[String])(implicit system: ClassicActorSystemProvider): Future[Credentials] = GoogleComputeMetadata .getProjectId() .map(projectId => new ComputeEngineCredentials(projectId, scopes))(system.classicSystem.dispatcher) - } } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala index 7d1ec3e24..ac9062513 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala @@ -100,7 +100,7 @@ object Credentials { * Credentials for accessing Google APIs */ @DoNotInherit -abstract class Credentials private[auth] () { +abstract class Credentials private[auth] { private[google] def projectId: String diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala index bbd9e848d..6282b2816 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala @@ -45,11 +45,10 @@ private[auth] final class GoogleOAuth2Credentials(credentials: OAuth2Credentials } } - private def requestMetadata(implicit ec: ExecutionContext): Future[util.Map[String, util.List[String]]] = { + private def requestMetadata(implicit ec: ExecutionContext): Future[util.Map[String, util.List[String]]] = credentials.get().map { token => util.Collections.singletonMap("Authorization", util.Collections.singletonList(token.toString)) } - } override def refresh(): Unit = credentials.refresh() } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2Credentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2Credentials.scala index 15260873a..c9cb2c20e 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2Credentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2Credentials.scala @@ -30,7 +30,7 @@ import scala.concurrent.{ ExecutionContext, Future, Promise } private[auth] object OAuth2Credentials { sealed abstract class Command final case class TokenRequest(promise: Promise[OAuth2BearerToken], settings: RequestSettings) extends Command - final case object ForceRefresh extends Command + case object ForceRefresh extends Command } @InternalApi @@ -61,7 +61,7 @@ private[auth] abstract class OAuth2Credentials(val projectId: String)(implicit m Int.MaxValue, OverflowStrategy.fail) .to( - Sink.fromMaterializer { (mat, attr) => + Sink.fromMaterializer { (mat, _) => Sink.foldAsync(Option.empty[AccessToken]) { case (cachedToken @ Some(token), TokenRequest(promise, _)) if !token.expiresSoon()(Clock.systemUTC()) => promise.success(OAuth2BearerToken(token.token)) diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala index 7e69fb4e6..6381d1219 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala @@ -35,12 +35,12 @@ private[connectors] object ServiceAccountCredentials { def apply(c: Config, scopes: Set[String])(implicit system: ClassicActorSystemProvider): Credentials = { val (projectId, clientEmail, privateKey) = { - if (c.getString("private-key").nonEmpty) { + if (c.getString("private-key").nonEmpty) ( c.getString("project-id"), c.getString("client-email"), c.getString("private-key")) - } else { + else { val src = Source.fromFile(c.getString("path")) val credentials = JsonParser(src.mkString).convertTo[ServiceAccountCredentialsFile] src.close() @@ -67,7 +67,6 @@ private final class ServiceAccountCredentials(projectId: String, override protected def getAccessToken()(implicit mat: Materializer, settings: RequestSettings, - clock: Clock): Future[AccessToken] = { + clock: Clock): Future[AccessToken] = GoogleOAuth2.getAccessToken(clientEmail, privateKey, scopes) - } } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala index be1f71ea9..ebb816b04 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala @@ -37,14 +37,14 @@ private[connectors] object UserAccessCredentials { new UserAccessCredentials(clientId, clientSecret, refreshToken, projectId) } - def apply(c: Config)(implicit system: ClassicActorSystemProvider): Credentials = { - if (c.getString("client-id").nonEmpty) { + def apply(c: Config)(implicit system: ClassicActorSystemProvider): Credentials = + if (c.getString("client-id").nonEmpty) apply( clientId = c.getString("client-id"), clientSecret = c.getString("client-secret"), refreshToken = c.getString("refresh-token"), projectId = c.getString("project-id")) - } else { + else { val src = Source.fromFile(c.getString("path")) val credentials = JsonParser(src.mkString).convertTo[UserAccessCredentialsFile] src.close() @@ -54,7 +54,6 @@ private[connectors] object UserAccessCredentials { refreshToken = credentials.refresh_token, projectId = credentials.quota_project_id) } - } final case class UserAccessCredentialsFile(client_id: String, client_secret: String, @@ -73,7 +72,6 @@ private final class UserAccessCredentials(clientId: String, override protected def getAccessToken()(implicit mat: Materializer, settings: RequestSettings, - clock: Clock): Future[AccessToken] = { + clock: Clock): Future[AccessToken] = UserAccessMetadata.getAccessToken(clientId, clientSecret, refreshToken) - } } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/implicits.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/implicits.scala index cdaac16c9..67da73641 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/implicits.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/implicits.scala @@ -60,7 +60,7 @@ private[connectors] object implicits { def withDefaultRetry: FromResponseUnmarshaller[Throwable] = Unmarshaller.withMaterializer { implicit ec => implicit mat => response => um(response).map { - case ex => + ex => response.status match { case TooManyRequests | InternalServerError | BadGateway | ServiceUnavailable | GatewayTimeout => Retry(ex) case _ => ex diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala index e7b85c461..449053dc3 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala @@ -40,7 +40,7 @@ final case class `X-Upload-Content-Type` private[connectors] (contentType: Conte override def value(): String = contentType.toString() override def renderInRequests(): Boolean = true override def renderInResponses(): Boolean = false - override def companion = `X-Upload-Content-Type` + override def companion: `X-Upload-Content-Type`.type = `X-Upload-Content-Type` /** * Java API diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/util/Retry.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/util/Retry.scala index 3ba2b9b26..51557b237 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/util/Retry.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/util/Retry.scala @@ -48,7 +48,7 @@ object Retry { def create(ex: Throwable): Throwable = apply(ex) /** - * A wrapper around Akka's [[pekko.pattern.RetrySupport]] which requires opt-in. + * A wrapper around Pekko's [[pekko.pattern.RetrySupport]] which requires opt-in. * An exception will trigger a retry only if it is wrapped in [[Retry]]. * Note that the exception will be unwrapped, should all the retry attempts fail * (i.e., this method will never raise a [[Retry]], only its underlying exception). diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala index 446336dd9..c6e183871 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala @@ -47,7 +47,7 @@ class OAuth2CredentialsSpec implicit val settings: RequestSettings = GoogleSettings().requestSettings implicit val clock: Clock = Clock.systemUTC() - final object AccessTokenProvider { + object AccessTokenProvider { @volatile var accessTokenPromise: Promise[AccessToken] = Promise.failed(new RuntimeException) } diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala index cc37d94f2..6bf58d982 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala @@ -51,7 +51,8 @@ class GoogleHttpSpec with ScalaFutures with MockitoSugar { - override def afterAll(): Unit = TestKit.shutdownActorSystem(system) + override def afterAll(): Unit = + TestKit.shutdownActorSystem(system) def mockHttp: HttpExt = { val http = mock[HttpExt] diff --git a/google-fcm/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/google-fcm/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..26f784835 --- /dev/null +++ b/google-fcm/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1,32 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$AndroidConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$AndroidNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$ApnsConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$BasicNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Condition") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Condition$And") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Condition$Not") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Condition$Or") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Condition$Topic") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Token") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$Topic") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$WebPushConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.FcmNotificationModels$WebPushNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.AndroidConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.AndroidNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.ApnsConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.ApnsFcmOptions") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.BasicNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Color") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition$And") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition$Not") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition$Or") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition$Topic") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.FcmNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.FcmOptions") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.LightSettings") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Token") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Topic") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.WebPushConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.WebPushFcmOptions") diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmNotificationModels.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmNotificationModels.scala index 4b7ed8263..5a26eb92e 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmNotificationModels.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmNotificationModels.scala @@ -23,13 +23,13 @@ object FcmNotificationModels { /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.BasicNotification */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.BasicNotification", "Alpakka 3.0.2") @Deprecated - case class BasicNotification(title: String, body: String) + final case class BasicNotification(title: String, body: String) /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.AndroidNotification */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.AndroidNotification", "Alpakka 3.0.2") @Deprecated - case class AndroidNotification( + final case class AndroidNotification( title: String, body: String, icon: String, @@ -45,7 +45,7 @@ object FcmNotificationModels { /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.AndroidConfig */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.AndroidConfig", "Alpakka 3.0.2") @Deprecated - case class AndroidConfig( + final case class AndroidConfig( collapse_key: String, priority: AndroidMessagePriority, ttl: String, @@ -72,17 +72,18 @@ object FcmNotificationModels { /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.WebPushConfig */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.WebPushConfig", "Alpakka 3.0.2") @Deprecated - case class WebPushNotification(title: String, body: String, icon: String) + final case class WebPushNotification(title: String, body: String, icon: String) /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.WebPushConfig */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.WebPushConfig", "Alpakka 3.0.2") @Deprecated - case class WebPushConfig(headers: Map[String, String], data: Map[String, String], notification: WebPushNotification) + final case class WebPushConfig(headers: Map[String, String], data: Map[String, String], + notification: WebPushNotification) /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.ApnsConfig */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.ApnsConfig", "Alpakka 3.0.2") @Deprecated - case class ApnsConfig(headers: Map[String, String], rawPayload: String) + final case class ApnsConfig(headers: Map[String, String], rawPayload: String) /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.NotificationTarget */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.NotificationTarget", @@ -93,38 +94,38 @@ object FcmNotificationModels { /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Token */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Token", "Alpakka 3.0.2") @Deprecated - case class Token(token: String) extends NotificationTarget + final case class Token(token: String) extends NotificationTarget /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Topic */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Topic", "Alpakka 3.0.2") @Deprecated - case class Topic(topic: String) extends NotificationTarget + final case class Topic(topic: String) extends NotificationTarget /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition", "Alpakka 3.0.2") @Deprecated - case class Condition(conditionText: String) extends NotificationTarget + final case class Condition(conditionText: String) extends NotificationTarget /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.Condition", "Alpakka 3.0.2") @Deprecated object Condition { sealed trait ConditionBuilder { - def &&(condition: ConditionBuilder) = And(this, condition) - def ||(condition: ConditionBuilder) = Or(this, condition) - def unary_! = Not(this) + def &&(condition: ConditionBuilder): And = And(this, condition) + def ||(condition: ConditionBuilder): Or = Or(this, condition) + def unary_! : Not = Not(this) def toConditionText: String } - case class Topic(topic: String) extends ConditionBuilder { + final case class Topic(topic: String) extends ConditionBuilder { def toConditionText: String = s"'$topic' in topics" } - case class And(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { + final case class And(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"(${condition1.toConditionText} && ${condition2.toConditionText})" } - case class Or(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { + final case class Or(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"(${condition1.toConditionText} || ${condition2.toConditionText})" } - case class Not(condition: ConditionBuilder) extends ConditionBuilder { + final case class Not(condition: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"!(${condition.toConditionText})" } @@ -136,7 +137,7 @@ object FcmNotificationModels { /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.FcmNotification */ @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.FcmNotification", "Alpakka 3.0.2") @Deprecated -case class FcmNotification( +final case class FcmNotification( data: Option[Map[String, String]] = None, notification: Option[BasicNotification] = None, android: Option[AndroidConfig] = None, @@ -172,7 +173,8 @@ object FcmNotification { empty.withBasicNotification(notification).withTarget(target) def apply(title: String, body: String, target: NotificationTarget): FcmNotification = empty.withBasicNotification(title, body).withTarget(target) - def basic(title: String, body: String, target: NotificationTarget) = FcmNotification(title, body, target) + def basic(title: String, body: String, target: NotificationTarget): FcmNotification = + FcmNotification(title, body, target) } /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models.FcmResponse */ diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala index 99b90869d..fa06b5be1 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala @@ -166,13 +166,13 @@ final class ForwardProxyCredentials private (val username: String, val password: /** Java API */ def getPassword: String = password - def withUsername(username: String) = copy(username = username) - def withPassword(password: String) = copy(password = password) + def withUsername(username: String): ForwardProxyCredentials = copy(username = username) + def withPassword(password: String): ForwardProxyCredentials = copy(password = password) private def copy(username: String = username, password: String = password) = new ForwardProxyCredentials(username, password) - override def toString = + override def toString: String = "ForwardProxyCredentials(" + s"username=$username," + s"password=******" + @@ -198,29 +198,29 @@ final class ForwardProxyCredentials private (val username: String, val password: object ForwardProxy { /** Scala API */ - def apply(host: String, port: Int) = + def apply(host: String, port: Int): ForwardProxy = new ForwardProxy(host, port, Option.empty, Option.empty) - def apply(host: String, port: Int, credentials: Option[ForwardProxyCredentials]) = + def apply(host: String, port: Int, credentials: Option[ForwardProxyCredentials]): ForwardProxy = new ForwardProxy(host, port, credentials, Option.empty) def apply(host: String, port: Int, credentials: Option[ForwardProxyCredentials], - trustPem: Option[ForwardProxyTrustPem]) = + trustPem: Option[ForwardProxyTrustPem]): ForwardProxy = new ForwardProxy(host, port, credentials, trustPem) /** Java API */ - def create(host: String, port: Int) = + def create(host: String, port: Int): ForwardProxy = apply(host, port) - def create(host: String, port: Int, credentials: Option[ForwardProxyCredentials]) = + def create(host: String, port: Int, credentials: Option[ForwardProxyCredentials]): ForwardProxy = apply(host, port, credentials) def create(host: String, port: Int, credentials: Option[ForwardProxyCredentials], - trustPem: Option[ForwardProxyTrustPem]) = + trustPem: Option[ForwardProxyTrustPem]): ForwardProxy = apply(host, port, credentials, trustPem) } @@ -246,9 +246,9 @@ final class ForwardProxy private (val host: String, def getForwardProxyTrustPem: java.util.Optional[ForwardProxyTrustPem] = trustPem.toJava - def withHost(host: String) = copy(host = host) - def withPort(port: Int) = copy(port = port) - def withCredentials(credentials: ForwardProxyCredentials) = copy(credentials = Option(credentials)) + def withHost(host: String): ForwardProxy = copy(host = host) + def withPort(port: Int): ForwardProxy = copy(port = port) + def withCredentials(credentials: ForwardProxyCredentials): ForwardProxy = copy(credentials = Option(credentials)) private def copy(host: String = host, port: Int = port, @@ -256,7 +256,7 @@ final class ForwardProxy private (val host: String, trustPem: Option[ForwardProxyTrustPem] = trustPem) = new ForwardProxy(host, port, credentials, trustPem) - override def toString = + override def toString: String = "ForwardProxy(" + s"host=$host," + s"port=$port," + @@ -333,16 +333,14 @@ object FcmSettings { */ @deprecated("Use org.apache.pekko.stream.connectors.google.GoogleSettings", "Alpakka 3.0.0") @Deprecated - def create(clientEmail: String, privateKey: String, projectId: String): FcmSettings = { + def create(clientEmail: String, privateKey: String, projectId: String): FcmSettings = apply(clientEmail, privateKey, projectId) - } /** * @deprecated Use [[pekko.stream.connectors.google.GoogleSettings]] */ @deprecated("Use org.apache.pekko.stream.connectors.google.GoogleSettings", "Alpakka 3.0.0") @Deprecated - def create(clientEmail: String, privateKey: String, projectId: String, forwardProxy: ForwardProxy): FcmSettings = { + def create(clientEmail: String, privateKey: String, projectId: String, forwardProxy: ForwardProxy): FcmSettings = apply(clientEmail, privateKey, projectId, forwardProxy) - } } diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala index 4e1768204..ca5104386 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala @@ -27,7 +27,7 @@ import spray.json._ @InternalApi @deprecated("Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.impl.FcmSend", "Alpakka 3.0.2") @Deprecated -private[fcm] case class FcmSend(validate_only: Boolean, message: FcmNotification) +private[fcm] final case class FcmSend(validate_only: Boolean, message: FcmNotification) /** * INTERNAL API @@ -41,14 +41,14 @@ private[fcm] object FcmJsonSupport extends DefaultJsonProtocol with SprayJsonSup implicit object FcmSuccessResponseJsonFormat extends RootJsonFormat[FcmSuccessResponse] { def write(c: FcmSuccessResponse): JsValue = JsString(c.name) - def read(value: JsValue) = value match { + def read(value: JsValue): FcmSuccessResponse = value match { case JsObject(fields) if fields.contains("name") => FcmSuccessResponse(fields("name").convertTo[String]) case other => throw DeserializationException(s"object containing `name` expected, but we get $other") } } implicit object FcmErrorResponseJsonFormat extends RootJsonFormat[FcmErrorResponse] { def write(c: FcmErrorResponse): JsValue = c.rawError.parseJson - def read(value: JsValue) = FcmErrorResponse(value.toString) + def read(value: JsValue): FcmErrorResponse = FcmErrorResponse(value.toString) } implicit object FcmResponseFormat extends RootJsonReader[FcmResponse] { diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala index 859543def..21be14113 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala @@ -56,11 +56,10 @@ private[fcm] class FcmSender { implicit private val unmarshaller: FromResponseUnmarshaller[FcmSuccessResponse] = Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => - if (response.status.isSuccess) { + if (response.status.isSuccess) Unmarshal(response.entity).to[FcmSuccessResponse] - } else { + else Unmarshal(response.entity).to[FcmErrorResponse].map(error => throw FcmErrorException(error)) - } }.withDefaultRetry /** Use org.apache.pekko.stream.connectors.google.firebase.fcm.v1.impl.FcmErrorException */ diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala index fdc9008b4..0edd572c9 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala @@ -23,7 +23,7 @@ import spray.json._ * INTERNAL API */ @InternalApi -private[fcm] case class FcmSend(validate_only: Boolean, message: FcmNotification) +private[fcm] final case class FcmSend(validate_only: Boolean, message: FcmNotification) /** * INTERNAL API @@ -35,14 +35,14 @@ private[fcm] object FcmJsonSupport extends DefaultJsonProtocol with SprayJsonSup implicit object FcmSuccessResponseJsonFormat extends RootJsonFormat[FcmSuccessResponse] { def write(c: FcmSuccessResponse): JsValue = JsString(c.name) - def read(value: JsValue) = value match { + def read(value: JsValue): FcmSuccessResponse = value match { case JsObject(fields) if fields.contains("name") => FcmSuccessResponse(fields("name").convertTo[String]) case other => throw DeserializationException(s"object containing `name` expected, but we get $other") } } implicit object FcmErrorResponseJsonFormat extends RootJsonFormat[FcmErrorResponse] { def write(c: FcmErrorResponse): JsValue = c.rawError.parseJson - def read(value: JsValue) = FcmErrorResponse(value.toString) + def read(value: JsValue): FcmErrorResponse = FcmErrorResponse(value.toString) } implicit object FcmResponseFormat extends RootJsonReader[FcmResponse] { diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala index 66e96560c..486e7f04c 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala @@ -51,11 +51,10 @@ private[fcm] class FcmSender { implicit private val unmarshaller: FromResponseUnmarshaller[FcmSuccessResponse] = Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => - if (response.status.isSuccess) { + if (response.status.isSuccess) Unmarshal(response.entity).to[FcmSuccessResponse] - } else { + else Unmarshal(response.entity).to[FcmErrorResponse].map(error => throw FcmErrorException(error)) - } }.withDefaultRetry private case class FcmErrorException(error: FcmErrorResponse) extends Exception diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/AndroidConfig.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/AndroidConfig.scala index 96147b87e..01f855f6c 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/AndroidConfig.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/AndroidConfig.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models * AndroidConfig model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#AndroidConfig */ -case class AndroidConfig( +final case class AndroidConfig( collapse_key: Option[String] = None, priority: Option[AndroidMessagePriority] = None, ttl: Option[String] = None, @@ -53,7 +53,7 @@ object AndroidConfig { * AndroidNotification model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#AndroidNotification */ -case class AndroidNotification( +final case class AndroidNotification( title: Option[String] = None, body: Option[String] = None, icon: Option[String] = None, @@ -181,7 +181,7 @@ case object Secret extends Visibility * LightSettings model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#LightSettings */ -case class LightSettings( +final case class LightSettings( color: Option[Color] = None, light_on_duration: Option[String] = None, light_off_duration: Option[String] = None) { @@ -202,4 +202,4 @@ object LightSettings { * Color model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#Color */ -case class Color(red: Double, green: Double, blue: Double, alpha: Double) +final case class Color(red: Double, green: Double, blue: Double, alpha: Double) diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/ApnsConfig.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/ApnsConfig.scala index 3dc4a9528..878b7987f 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/ApnsConfig.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/ApnsConfig.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models * ApnsConfig model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#ApnsConfig */ -case class ApnsConfig( +final case class ApnsConfig( headers: Option[Map[String, String]] = None, payload: Option[String] = None, fcm_options: Option[FcmOption] = None) { diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/BasicNotification.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/BasicNotification.scala index 4c0d1f0b2..50937ae62 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/BasicNotification.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/BasicNotification.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models * Notification model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#Notification */ -case class BasicNotification(title: String, body: String, image: Option[String] = None) { +final case class BasicNotification(title: String, body: String, image: Option[String] = None) { def withTitle(value: String): BasicNotification = this.copy(title = value) def withBody(value: String): BasicNotification = this.copy(body = value) def withImage(value: String): BasicNotification = this.copy(image = Option(value)) diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmNotification.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmNotification.scala index 88b51ec70..a80461edb 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmNotification.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmNotification.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models * Firebase Cloud Message model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages */ -case class FcmNotification( +final case class FcmNotification( data: Option[Map[String, String]] = None, notification: Option[BasicNotification] = None, android: Option[AndroidConfig] = None, diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmOption.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmOption.scala index a33975633..88be73707 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmOption.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/FcmOption.scala @@ -19,7 +19,7 @@ sealed trait FcmOption * FcmOptions model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#FcmOptions */ -case class FcmOptions(analytics_label: String) extends FcmOption +final case class FcmOptions(analytics_label: String) extends FcmOption object FcmOptions { def create(value: String): FcmOptions = FcmOptions(value) @@ -29,7 +29,8 @@ object FcmOptions { * ApnsFcmOptions model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#ApnsFcmOptions */ -case class ApnsFcmOptions(analytics_label: Option[String] = None, image: Option[String] = None) extends FcmOption { +final case class ApnsFcmOptions( + analytics_label: Option[String] = None, image: Option[String] = None) extends FcmOption { def withAnalyticsLabel(value: String): ApnsFcmOptions = this.copy(analytics_label = Option(value)) def withImage(value: String): ApnsFcmOptions = this.copy(image = Option(value)) } @@ -42,7 +43,8 @@ object ApnsFcmOptions { * WebpushFcmOptions model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#WebpushFcmOptions */ -case class WebPushFcmOptions(analytics_label: Option[String] = None, link: Option[String] = None) extends FcmOption { +final case class WebPushFcmOptions( + analytics_label: Option[String] = None, link: Option[String] = None) extends FcmOption { def withAnalyticsLabel(value: String): WebPushFcmOptions = this.copy(analytics_label = Option(value)) def withLink(value: String): WebPushFcmOptions = this.copy(link = Option(value)) } diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/NotificationTarget.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/NotificationTarget.scala index 54a78a019..d4d0c1a2e 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/NotificationTarget.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/NotificationTarget.scala @@ -19,37 +19,37 @@ sealed trait NotificationTarget * Token model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages */ -case class Token(token: String) extends NotificationTarget +final case class Token(token: String) extends NotificationTarget /** * Topic model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages */ -case class Topic(topic: String) extends NotificationTarget +final case class Topic(topic: String) extends NotificationTarget /** * Condition model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages */ -case class Condition(conditionText: String) extends NotificationTarget +final case class Condition(conditionText: String) extends NotificationTarget object Condition { sealed trait ConditionBuilder { - def &&(condition: ConditionBuilder) = And(this, condition) - def ||(condition: ConditionBuilder) = Or(this, condition) - def unary_! = Not(this) + def &&(condition: ConditionBuilder): And = And(this, condition) + def ||(condition: ConditionBuilder): Or = Or(this, condition) + def unary_! : Not = Not(this) def toConditionText: String } - case class Topic(topic: String) extends ConditionBuilder { + final case class Topic(topic: String) extends ConditionBuilder { def toConditionText: String = s"'$topic' in topics" } - case class And(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { + final case class And(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"(${condition1.toConditionText} && ${condition2.toConditionText})" } - case class Or(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { + final case class Or(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"(${condition1.toConditionText} || ${condition2.toConditionText})" } - case class Not(condition: ConditionBuilder) extends ConditionBuilder { + final case class Not(condition: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"!(${condition.toConditionText})" } diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/WebPushConfig.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/WebPushConfig.scala index ca70f687b..e259f3ef7 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/WebPushConfig.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/models/WebPushConfig.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.google.firebase.fcm.v1.models * WebpushConfig model. * @see https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages#WebpushConfig */ -case class WebPushConfig(headers: Option[Map[String, String]] = None, +final case class WebPushConfig(headers: Option[Map[String, String]] = None, data: Option[Map[String, String]] = None, notification: Option[String] = None, fcm_options: Option[FcmOption] = None) { diff --git a/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala b/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala index 8b8e46a51..5d7ec4fa2 100644 --- a/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala +++ b/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala @@ -73,7 +73,7 @@ class FcmExamples { headers = Option(Map.empty), data = Option(Map.empty), notification = - Option("{\"title\": \"web-title\", \"body\": \"web-body\", \"icon\": \"http://example.com/icon.png\"}"))) + Option("{\"title\": \"web-title\", \"body\": \"web-body\", \"icon\": \"https://example.com/icon.png\"}"))) val sendable = buildedNotification.isSendable // #noti-create diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala index 1f58c5a57..9cd5d5ade 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala @@ -75,7 +75,7 @@ object HTableSettings { def apply[T](conf: Configuration, tableName: TableName, columnFamilies: immutable.Seq[String], - converter: T => immutable.Seq[Mutation]) = + converter: T => immutable.Seq[Mutation]): HTableSettings[T] = new HTableSettings(conf, tableName, columnFamilies, converter) /** diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseCapabilities.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseCapabilities.scala index 354cca249..6f6c632b8 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseCapabilities.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseCapabilities.scala @@ -30,16 +30,15 @@ import scala.language.postfixOps private[impl] trait HBaseCapabilities { this: StageLogging => - def twr[A <: Closeable, B](resource: A)(doWork: A => B): Try[B] = + private def twr[A <: Closeable, B](resource: A)(doWork: A => B): Try[B] = try { Success(doWork(resource)) } catch { case e: Exception => Failure(e) } finally { try { - if (resource != null) { + if (resource != null) resource.close() - } } catch { case e: Exception => log.error(e, e.getMessage) // should be logged } @@ -52,7 +51,7 @@ private[impl] trait HBaseCapabilities { this: StageLogging => * @param timeout in second * @return */ - def connect(conf: Configuration, timeout: Int = 10) = + def connect(conf: Configuration, timeout: Int = 10): Connection = Await.result(Future(ConnectionFactory.createConnection(conf)), timeout seconds) private[impl] def getOrCreateTable(tableName: TableName, columnFamilies: Seq[String])( diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala index b18579e45..58865fa67 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala @@ -29,12 +29,12 @@ private[hbase] class HBaseFlowStage[A](settings: HTableSettings[A]) extends Grap private val in = Inlet[A]("messages") private val out = Outlet[A]("result") - override val shape = FlowShape(in, out) + override val shape: FlowShape[A, A] = FlowShape(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with StageLogging with HBaseCapabilities { - override protected def logSource = classOf[HBaseFlowStage[A]] + override protected def logSource: Class[HBaseFlowStage[A]] = classOf[HBaseFlowStage[A]] implicit val connection: Connection = connect(settings.conf) @@ -42,14 +42,14 @@ private[hbase] class HBaseFlowStage[A](settings: HTableSettings[A]) extends Grap setHandler(out, new OutHandler { - override def onPull() = + override def onPull(): Unit = pull(in) }) setHandler( in, new InHandler { - override def onPush() = { + override def onPush(): Unit = { val msg = grab(in) val mutations = settings.converter(msg) @@ -69,7 +69,7 @@ private[hbase] class HBaseFlowStage[A](settings: HTableSettings[A]) extends Grap }) - override def postStop() = { + override def postStop(): Unit = { log.debug("Stage completed") try { table.close() diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala index 7c8a30bcc..b9449885f 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala @@ -43,7 +43,7 @@ private[hbase] final class HBaseSourceLogic[A](scan: Scan, implicit val connection: Connection = connect(settings.conf) lazy val table: Table = getOrCreateTable(settings.tableName, settings.columnFamilies).get - private var results: java.util.Iterator[Result] = null + private var results: java.util.Iterator[Result] = _ setHandler(out, this) @@ -65,10 +65,9 @@ private[hbase] final class HBaseSourceLogic[A](scan: Scan, } override def onPull(): Unit = - if (results.hasNext) { + if (results.hasNext) emit(out, results.next) - } else { + else completeStage() - } } diff --git a/hbase/src/test/java/docs/javadsl/HBaseStageTest.java b/hbase/src/test/java/docs/javadsl/HBaseStageTest.java index eec183399..3c3449436 100644 --- a/hbase/src/test/java/docs/javadsl/HBaseStageTest.java +++ b/hbase/src/test/java/docs/javadsl/HBaseStageTest.java @@ -34,6 +34,7 @@ import org.junit.Test; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -63,89 +64,64 @@ public static void teardown() { // #create-converter-put Function> hBaseConverter = person -> { - try { - Put put = new Put(String.format("id_%d", person.id).getBytes("UTF-8")); + Put put = new Put(String.format("id_%d", person.id).getBytes(StandardCharsets.UTF_8)); put.addColumn( - "info".getBytes("UTF-8"), "name".getBytes("UTF-8"), person.name.getBytes("UTF-8")); + "info".getBytes(StandardCharsets.UTF_8), "name".getBytes(StandardCharsets.UTF_8), person.name.getBytes(StandardCharsets.UTF_8)); return Collections.singletonList(put); - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - return Collections.emptyList(); - } }; // #create-converter-put // #create-converter-append Function> appendHBaseConverter = person -> { - try { - Append append = new Append(String.format("id_%d", person.id).getBytes("UTF-8")); + Append append = new Append(String.format("id_%d", person.id).getBytes(StandardCharsets.UTF_8)); append.add( - "info".getBytes("UTF-8"), "aliases".getBytes("UTF-8"), person.name.getBytes("UTF-8")); + "info".getBytes(StandardCharsets.UTF_8), "aliases".getBytes(StandardCharsets.UTF_8), person.name.getBytes(StandardCharsets.UTF_8)); return Collections.singletonList(append); - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - return Collections.emptyList(); - } }; // #create-converter-append // #create-converter-delete Function> deleteHBaseConverter = person -> { - try { - Delete delete = new Delete(String.format("id_%d", person.id).getBytes("UTF-8")); + Delete delete = new Delete(String.format("id_%d", person.id).getBytes(StandardCharsets.UTF_8)); return Collections.singletonList(delete); - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - return Collections.emptyList(); - } }; // #create-converter-delete // #create-converter-increment Function> incrementHBaseConverter = person -> { - try { - Increment increment = new Increment(String.format("id_%d", person.id).getBytes("UTF-8")); - increment.addColumn("info".getBytes("UTF-8"), "numberOfChanges".getBytes("UTF-8"), 1); + Increment increment = new Increment(String.format("id_%d", person.id).getBytes(StandardCharsets.UTF_8)); + increment.addColumn("info".getBytes(StandardCharsets.UTF_8), "numberOfChanges".getBytes(StandardCharsets.UTF_8), 1); return Collections.singletonList(increment); - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - return Collections.emptyList(); - } }; // #create-converter-increment // #create-converter-complex Function> complexHBaseConverter = person -> { - try { - byte[] id = String.format("id_%d", person.id).getBytes("UTF-8"); - byte[] infoFamily = "info".getBytes("UTF-8"); + byte[] id = String.format("id_%d", person.id).getBytes(StandardCharsets.UTF_8); + byte[] infoFamily = "info".getBytes(StandardCharsets.UTF_8); if (person.id != 0 && person.name.isEmpty()) { Delete delete = new Delete(id); return Collections.singletonList(delete); } else if (person.id != 0) { Put put = new Put(id); - put.addColumn(infoFamily, "name".getBytes("UTF-8"), person.name.getBytes("UTF-8")); + put.addColumn(infoFamily, "name".getBytes(StandardCharsets.UTF_8), person.name.getBytes(StandardCharsets.UTF_8)); Increment increment = new Increment(id); - increment.addColumn(infoFamily, "numberOfChanges".getBytes("UTF-8"), 1); + increment.addColumn(infoFamily, "numberOfChanges".getBytes(StandardCharsets.UTF_8), 1); return Arrays.asList(put, increment); } else { return Collections.emptyList(); } - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - return Collections.emptyList(); - } }; // #create-converter-complex @@ -213,7 +189,7 @@ public void readFromSource() assertEquals(Done.getInstance(), o.toCompletableFuture().get(5, TimeUnit.SECONDS)); // #source - Scan scan = new Scan(new Get("id_300".getBytes("UTF-8"))); + Scan scan = new Scan(new Get("id_300".getBytes(StandardCharsets.UTF_8))); CompletionStage> f = HTableStage.source(scan, tableSettings).runWith(Sink.seq(), system); diff --git a/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala b/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala index a6a867107..7fb1e1578 100644 --- a/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala +++ b/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala @@ -82,8 +82,8 @@ class HBaseStageSpec // #create-converter-increment // #create-converter-complex - val mutationsHBaseConverter: Person => immutable.Seq[Mutation] = { person => - if (person.id != 0) { + val mutationsHBaseConverter: Person => immutable.Seq[Mutation] = person => + if (person.id != 0) if (person.name.isEmpty) { // Delete the specified row val delete = new Delete(s"id_${person.id}") @@ -98,10 +98,8 @@ class HBaseStageSpec List(put, increment) } - } else { + else List.empty - } - } // #create-converter-complex // #create-settings diff --git a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/HdfsFlowStage.scala b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/HdfsFlowStage.scala index fcf8410b2..89b156ad2 100644 --- a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/HdfsFlowStage.scala +++ b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/HdfsFlowStage.scala @@ -103,9 +103,8 @@ private[hdfs] final class HdfsFlowLogic[W, I, C]( } private def tryPull(): Unit = - if (!isClosed(inlet) && !hasBeenPulled(inlet)) { + if (!isClosed(inlet) && !hasBeenPulled(inlet)) pull(inlet) - } private def onPushProgram(input: HdfsWriteMessage[I, C]) = for { @@ -167,7 +166,7 @@ private[hdfs] final class HdfsFlowLogic[W, I, C]( */ private def tryRotateOutput: FlowStep[W, I, (Int, Option[RotationMessage])] = FlowStep[W, I, (Int, Option[RotationMessage])] { state => - if (state.rotationStrategy.should()) { + if (state.rotationStrategy.should()) rotateOutput .run(state) .map { @@ -175,9 +174,8 @@ private[hdfs] final class HdfsFlowLogic[W, I, C]( (newState, (state.rotationCount, Some(message))) } .value - } else { + else (state, (state.rotationCount, None)) - } } private def trySyncOutput: FlowStep[W, I, Boolean] = @@ -186,9 +184,8 @@ private[hdfs] final class HdfsFlowLogic[W, I, C]( state.writer.sync() val newSync = state.syncStrategy.reset() (state.copy(syncStrategy = newSync), true) - } else { + } else (state, false) - } } private def tryPush(messages: Seq[OutgoingMessage[C]]): FlowStep[W, I, Unit] = diff --git a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/writer/CompressedDataWriter.scala b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/writer/CompressedDataWriter.scala index 66d8ec94c..9b3e97882 100644 --- a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/writer/CompressedDataWriter.scala +++ b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/impl/writer/CompressedDataWriter.scala @@ -55,7 +55,8 @@ private[writer] final case class CompressedDataWriter( copy(maybeTargetPath = Some(outputFileWithExtension(rotationCount))) } - override protected def create(fs: FileSystem, file: Path): FSDataOutputStream = fs.create(file, overwrite) + override protected def create(fs: FileSystem, file: Path): FSDataOutputStream = + fs.create(file, overwrite) private def outputFileWithExtension(rotationCount: Long): Path = { val candidatePath = createTargetPath(pathGenerator, rotationCount) diff --git a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala index fd2a98b01..713fea03b 100644 --- a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala +++ b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala @@ -29,7 +29,7 @@ import org.apache.hadoop.io.compress.CompressionCodec object HdfsSource { /** - * Java API: creates a [[Source]] that consumes as [[ByteString]] + * Java API: creates a [[javadsl.Source]] that consumes as [[ByteString]] * * @param fs Hadoop file system * @param path the file to open @@ -40,7 +40,7 @@ object HdfsSource { ScalaHdfsSource.data(fs, path).mapMaterializedValue(_.asJava).asJava /** - * Java API: creates a [[Source]] that consumes as [[ByteString]] + * Java API: creates a [[javadsl.Source]] that consumes as [[ByteString]] * * @param fs Hadoop file system * @param path the file to open @@ -53,7 +53,7 @@ object HdfsSource { ScalaHdfsSource.data(fs, path, chunkSize).mapMaterializedValue(_.asJava).asJava /** - * Java API: creates a [[Source]] that consumes as [[ByteString]] + * Java API: creates a [[javadsl.Source]] that consumes as [[ByteString]] * * @param fs Hadoop file system * @param path the file to open @@ -66,7 +66,7 @@ object HdfsSource { ScalaHdfsSource.compressed(fs, path, codec).mapMaterializedValue(_.asJava).asJava /** - * Java API: creates a [[Source]] that consumes as [[ByteString]] + * Java API: creates a [[javadsl.Source]] that consumes as [[ByteString]] * * @param fs Hadoop file system * @param path the file to open @@ -81,7 +81,7 @@ object HdfsSource { ScalaHdfsSource.compressed(fs, path, codec, chunkSize).mapMaterializedValue(_.asJava).asJava /** - * Java API: creates a [[Source]] that consumes as [[(K, V]] + * Java API: creates a [[javadsl.Source]] that consumes as [[(K, V)]] * * @param fs Hadoop file system * @param path the file to open diff --git a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/model.scala b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/model.scala index 8712307a3..1da78655b 100644 --- a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/model.scala +++ b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/model.scala @@ -48,7 +48,7 @@ final class HdfsWritingSettings private ( lineSeparator = lineSeparator, pathGenerator = pathGenerator) - override def toString = + override def toString: String = "HdfsWritingSettings(" + s"overwrite=$overwrite," + s"newLine=$newLine," + @@ -65,7 +65,7 @@ object HdfsWritingSettings { val default = new HdfsWritingSettings( overwrite = true, newLine = false, - lineSeparator = System.getProperty("line.separator"), + lineSeparator = java.lang.System.lineSeparator(), pathGenerator = DefaultFilePathGenerator) /** Scala API */ @@ -128,10 +128,10 @@ final case class WrittenMessage[P](passThrough: P, inRotation: Int) extends Outg sealed case class FileUnit(byteCount: Long) object FileUnit { - val KB = FileUnit(Math.pow(2, 10).toLong) - val MB = FileUnit(Math.pow(2, 20).toLong) - val GB = FileUnit(Math.pow(2, 30).toLong) - val TB = FileUnit(Math.pow(2, 40).toLong) + val KB: FileUnit = FileUnit(Math.pow(2, 10).toLong) + val MB: FileUnit = FileUnit(Math.pow(2, 20).toLong) + val GB: FileUnit = FileUnit(Math.pow(2, 30).toLong) + val TB: FileUnit = FileUnit(Math.pow(2, 40).toLong) } sealed abstract class FilePathGenerator extends ((Long, Long) => Path) { diff --git a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala index 1bfbee788..c82d8db34 100644 --- a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala +++ b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala @@ -230,9 +230,9 @@ class HdfsWriterSpec "kafka-example - store data with passThrough" in { // #define-kafka-classes - case class Book(title: String) - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class Book(title: String) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) // #define-kafka-classes // #kafka-example @@ -519,7 +519,7 @@ class HdfsWriterSpec HdfsWritingSettings() .withOverwrite(true) .withNewLine(false) - .withLineSeparator(System.getProperty("line.separator")) + .withLineSeparator(java.lang.System.lineSeparator()) .withPathGenerator(pathGenerator) // #define-settings settings diff --git a/huawei-push-kit/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/huawei-push-kit/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..5b1fb5300 --- /dev/null +++ b/huawei-push-kit/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1,20 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.AndroidConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.AndroidNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.ApnsConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.BadgeNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.BasicNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Button") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.ClickAction") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Color") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Condition") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Condition$And") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Condition$Not") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Condition$Or") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Condition$Topic") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.LightSettings") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.PushKitNotification") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Tokens") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.Topic") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.WebActions") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.WebConfig") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.huawei.pushkit.models.WebNotification") diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyHttpsContext.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyHttpsContext.scala index 0036f2f18..0939d2aaf 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyHttpsContext.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyHttpsContext.scala @@ -29,17 +29,16 @@ import javax.net.ssl.{ SSLContext, TrustManagerFactory } @InternalApi private[pushkit] object ForwardProxyHttpsContext { - val SSL = "SSL" - val X509 = "X509" + private val SSL = "SSL" + private val X509 = "X509" implicit class ForwardProxyHttpsContext(forwardProxy: ForwardProxy) { - def httpsContext(system: ActorSystem): HttpsConnectionContext = { + def httpsContext(system: ActorSystem): HttpsConnectionContext = forwardProxy.trustPem match { case Some(trustPem) => createHttpsContext(trustPem) case None => Http()(system).defaultClientHttpsContext } - } } private def createHttpsContext(trustPem: ForwardProxyTrustPem) = { diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyPoolSettings.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyPoolSettings.scala index 1269f0d10..bd5f1a977 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyPoolSettings.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/ForwardProxyPoolSettings.scala @@ -30,7 +30,7 @@ private[pushkit] object ForwardProxyPoolSettings { implicit class ForwardProxyPoolSettings(forwardProxy: ForwardProxy) { - def poolSettings(system: ActorSystem) = { + def poolSettings(system: ActorSystem): ConnectionPoolSettings = { val address = InetSocketAddress.createUnresolved(forwardProxy.host, forwardProxy.port) val transport = forwardProxy.credentials.fold(ClientTransport.httpsProxy(address))(c => ClientTransport.httpsProxy(address, BasicHttpCredentials(c.username, c.password))) diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettingExt.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettingExt.scala index 019e6d2d4..2c4c9f9b9 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettingExt.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettingExt.scala @@ -51,7 +51,7 @@ private[pushkit] object HmsSettingExt extends ExtensionId[HmsSettingExt] with Ex def apply()(implicit system: ActorSystem): HmsSettingExt = super.apply(system) - override def lookup = HmsSettingExt + override def lookup: HmsSettingExt.type = HmsSettingExt override def createExtension(system: ExtendedActorSystem) = new HmsSettingExt(system) /** diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettings.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettings.scala index 4e37007a7..c5dd39060 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettings.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/HmsSettings.scala @@ -87,9 +87,8 @@ object HmsSettings { appSecret: String, isTest: Boolean, maxConcurrentConnections: Int, - forwardProxy: ForwardProxy): HmsSettings = { + forwardProxy: ForwardProxy): HmsSettings = apply(appId, appSecret, isTest, maxConcurrentConnections, Option(forwardProxy)) - } def apply( appId: String, @@ -105,7 +104,7 @@ object HmsSettings { appId: String, appSecret: String, isTest: Boolean, - maxConcurrentConnections: Int) = apply(appId, appSecret, isTest, maxConcurrentConnections) + maxConcurrentConnections: Int): HmsSettings = apply(appId, appSecret, isTest, maxConcurrentConnections) def apply( appId: String, @@ -116,9 +115,8 @@ object HmsSettings { */ def create( appId: String, - appSecret: String) = { + appSecret: String): HmsSettings = apply(appId, appSecret) - } def apply( appId: String, @@ -132,7 +130,7 @@ object HmsSettings { def create( appId: String, appSecret: String, - forwardProxy: ForwardProxy) = apply(appId, appSecret, Option(forwardProxy)) + forwardProxy: ForwardProxy): HmsSettings = apply(appId, appSecret, Option(forwardProxy)) } @@ -143,11 +141,11 @@ final case class HmsSettings @InternalApi private ( maxConcurrentConnections: Int, forwardProxy: Option[ForwardProxy]) { - def getAppId = appId - def getAppSecret = appSecret - def isTest = test - def getMaxConcurrentConnections = maxConcurrentConnections - def getForwardProxy = forwardProxy + def getAppId: String = appId + def getAppSecret: String = appSecret + def isTest: Boolean = test + def getMaxConcurrentConnections: Int = maxConcurrentConnections + def getForwardProxy: Option[ForwardProxy] = forwardProxy def withAppId(value: String): HmsSettings = copy(appId = value) def withAppSecret(value: String): HmsSettings = copy(appSecret = value) @@ -182,7 +180,7 @@ object ForwardProxy { def apply(host: String, port: Int, credentials: Option[ForwardProxyCredentials], - trustPem: Option[ForwardProxyTrustPem]) = + trustPem: Option[ForwardProxyTrustPem]): ForwardProxy = new ForwardProxy(host, port, credentials, trustPem) /** @@ -191,25 +189,25 @@ object ForwardProxy { def create(host: String, port: Int, credentials: Option[ForwardProxyCredentials], - trustPem: Option[ForwardProxyTrustPem]) = + trustPem: Option[ForwardProxyTrustPem]): ForwardProxy = apply(host, port, credentials, trustPem) - def apply(host: String, port: Int) = + def apply(host: String, port: Int): ForwardProxy = new ForwardProxy(host, port, Option.empty, Option.empty) /** * Java API. */ - def create(host: String, port: Int) = + def create(host: String, port: Int): ForwardProxy = apply(host, port) - def apply(host: String, port: Int, credentials: Option[ForwardProxyCredentials]) = + def apply(host: String, port: Int, credentials: Option[ForwardProxyCredentials]): ForwardProxy = new ForwardProxy(host, port, credentials, Option.empty) /** * Java API. */ - def create(host: String, port: Int, credentials: Option[ForwardProxyCredentials]) = + def create(host: String, port: Int, credentials: Option[ForwardProxyCredentials]): ForwardProxy = apply(host, port, credentials) } @@ -218,15 +216,15 @@ final case class ForwardProxy @InternalApi private (host: String, credentials: Option[ForwardProxyCredentials], trustPem: Option[ForwardProxyTrustPem]) { - def getHost = host - def getPort = port - def getCredentials = credentials - def getForwardProxyTrustPem = trustPem + def getHost: String = host + def getPort: Int = port + def getCredentials: Option[ForwardProxyCredentials] = credentials + def getForwardProxyTrustPem: Option[ForwardProxyTrustPem] = trustPem - def withHost(host: String) = copy(host = host) - def withPort(port: Int) = copy(port = port) - def withCredentials(credentials: ForwardProxyCredentials) = copy(credentials = Option(credentials)) - def withTrustPem(trustPem: ForwardProxyTrustPem) = copy(trustPem = Option(trustPem)) + def withHost(host: String): ForwardProxy = copy(host = host) + def withPort(port: Int): ForwardProxy = copy(port = port) + def withCredentials(credentials: ForwardProxyCredentials): ForwardProxy = copy(credentials = Option(credentials)) + def withTrustPem(trustPem: ForwardProxyTrustPem): ForwardProxy = copy(trustPem = Option(trustPem)) } object ForwardProxyCredentials { @@ -246,8 +244,8 @@ final case class ForwardProxyCredentials @InternalApi private (username: String, def getUsername: String = username def getPassword: String = password - def withUsername(username: String) = copy(username = username) - def withPassword(password: String) = copy(password = password) + def withUsername(username: String): ForwardProxyCredentials = copy(username = username) + def withPassword(password: String): ForwardProxyCredentials = copy(password = password) } object ForwardProxyTrustPem { diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsSession.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsSession.scala index 910370368..2f4fc1fef 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsSession.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsSession.scala @@ -26,7 +26,7 @@ import scala.concurrent.Future */ @InternalApi private class HmsSession(conf: HmsSettings, tokenApi: HmsTokenApi) { - protected var maybeAccessToken: Option[Future[AccessTokenExpiry]] = None + private var maybeAccessToken: Option[Future[AccessTokenExpiry]] = None private def getNewToken()(implicit materializer: Materializer): Future[AccessTokenExpiry] = { val accessToken = tokenApi.getAccessToken(clientId = conf.appId, privateKey = conf.appSecret) @@ -42,11 +42,10 @@ private class HmsSession(conf: HmsSettings, tokenApi: HmsTokenApi) { maybeAccessToken .getOrElse(getNewToken()) .flatMap { result => - if (expiresSoon(result)) { + if (expiresSoon(result)) getNewToken() - } else { + else Future.successful(result) - } } .map(_.accessToken) } diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApi.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApi.scala index d9e7e7bc7..8c658942c 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApi.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApi.scala @@ -72,6 +72,6 @@ private[pushkit] class HmsTokenApi(http: => HttpExt, system: ActorSystem, forwar */ @InternalApi private[pushkit] object HmsTokenApi { - case class AccessTokenExpiry(accessToken: String, expiresAt: Long) - case class OAuthResponse(access_token: String, token_type: String, expires_in: Int) + final case class AccessTokenExpiry(accessToken: String, expiresAt: Long) + final case class OAuthResponse(access_token: String, token_type: String, expires_in: Int) } diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala index e2c2273f8..061adecae 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala @@ -25,7 +25,7 @@ import spray.json._ * INTERNAL API */ @InternalApi -private[pushkit] case class PushKitSend(validate_only: Boolean, message: PushKitNotification) +private[pushkit] final case class PushKitSend(validate_only: Boolean, message: PushKitNotification) /** * INTERNAL API @@ -48,7 +48,7 @@ private[pushkit] object PushKitJsonSupport extends DefaultJsonProtocol with Spra implicit object HmsResponseJsonFormat extends RootJsonFormat[PushKitResponse] { def write(c: PushKitResponse): JsValue = c.toJson(this) - def read(value: JsValue) = value match { + def read(value: JsValue): PushKitResponse = value match { case JsObject(fields) if fields.contains("code") && fields.contains("msg") => PushKitResponse( requestId = if (fields.contains("requestId")) fields("requestId").convertTo[String] else null, diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSender.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSender.scala index 92cbfc698..459abd948 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSender.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSender.scala @@ -67,11 +67,10 @@ private[pushkit] class PushKitSender { private def parse(response: Future[HttpResponse])(implicit materializer: Materializer): Future[Response] = { implicit val executionContext: ExecutionContext = materializer.executionContext response.flatMap { rsp => - if (rsp.status.isSuccess) { + if (rsp.status.isSuccess) Unmarshal(rsp.entity).to[PushKitResponse] - } else { + else Unmarshal(rsp.entity).to[ErrorResponse] - } } } } diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/AndroidConfig.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/AndroidConfig.scala index 53731600b..ec9a91858 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/AndroidConfig.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/AndroidConfig.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.huawei.pushkit.models * AndroidConfig model. * @see https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/https-send-api-0000001050986197-V5#EN-US_TOPIC_0000001134031085 */ -case class AndroidConfig(collapse_key: Option[Int] = None, +final case class AndroidConfig(collapse_key: Option[Int] = None, ttl: Option[String] = None, bi_tag: Option[String] = None, receipt_id: Option[String] = None, @@ -51,7 +51,7 @@ object AndroidConfig { * * @see https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/https-send-api-0000001050986197-V5#EN-US_TOPIC_0000001134031085 */ -case class AndroidNotification(title: Option[String] = None, +final case class AndroidNotification(title: Option[String] = None, body: Option[String] = None, icon: Option[String] = None, color: Option[String] = None, @@ -132,7 +132,7 @@ object AndroidNotification { /** * LightSettings model. */ -case class LightSettings(color: Option[Color] = None, +final case class LightSettings(color: Option[Color] = None, light_on_duration: Option[String] = None, light_off_duration: Option[String] = None) { def withColor(color: Color): LightSettings = this.copy(color = Option(color)) @@ -151,7 +151,7 @@ object LightSettings { /** * Color model. */ -case class Color(alpha: Option[Float] = None, +final case class Color(alpha: Option[Float] = None, red: Option[Float] = None, green: Option[Float] = None, blue: Option[Float] = None) { @@ -173,7 +173,7 @@ object Color { /** * Click Action model. */ -case class ClickAction(`type`: Option[Int] = None, +final case class ClickAction(`type`: Option[Int] = None, intent: Option[String] = None, url: Option[String] = None, action: Option[String] = None) { @@ -195,7 +195,8 @@ object ClickAction { /** * BadgeNotification model. */ -case class BadgeNotification(add_num: Option[Int] = None, `class`: Option[String] = None, set_num: Option[Int] = None) { +final case class BadgeNotification(add_num: Option[Int] = None, `class`: Option[String] = None, + set_num: Option[Int] = None) { def withAddNum(value: Int): BadgeNotification = this.copy(add_num = Option(value)) def withClass(value: String): BadgeNotification = this.copy(`class` = Option(value)) def withSetNum(value: Int): BadgeNotification = this.copy(set_num = Option(value)) @@ -209,7 +210,7 @@ object BadgeNotification { /** * Button model. */ -case class Button(name: Option[String] = None, +final case class Button(name: Option[String] = None, action_type: Option[Int] = None, intent_type: Option[Int] = None, intent: Option[String] = None, diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/ApnsConfig.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/ApnsConfig.scala index e6618b8f3..f72fccb45 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/ApnsConfig.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/ApnsConfig.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.huawei.pushkit.models * ApnsConfig model. * @see https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/https-send-api-0000001050986197-V5#EN-US_TOPIC_0000001134031085 */ -case class ApnsConfig(hms_options: Option[String] = None, +final case class ApnsConfig(hms_options: Option[String] = None, headers: Option[String] = None, payload: Option[String] = None) { def withHmsOptions(value: String): ApnsConfig = this.copy(hms_options = Option(value)) diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/BasicNotification.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/BasicNotification.scala index 49e6dbebf..fcc60a05a 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/BasicNotification.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/BasicNotification.scala @@ -17,7 +17,8 @@ package org.apache.pekko.stream.connectors.huawei.pushkit.models * Notification model. * @see https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/https-send-api-0000001050986197-V5#EN-US_TOPIC_0000001134031085 */ -case class BasicNotification(title: Option[String] = None, body: Option[String] = None, image: Option[String] = None) { +final case class BasicNotification(title: Option[String] = None, body: Option[String] = None, + image: Option[String] = None) { def withTitle(value: String): BasicNotification = this.copy(title = Option(value)) def withBody(value: String): BasicNotification = this.copy(body = Option(value)) def withImage(value: String): BasicNotification = this.copy(image = Option(value)) diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/NotificationTarget.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/NotificationTarget.scala index 89eb140e7..f91b09aa3 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/NotificationTarget.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/NotificationTarget.scala @@ -18,35 +18,35 @@ sealed trait NotificationTarget /** * Tokens model. */ -case class Tokens(token: Seq[String]) extends NotificationTarget +final case class Tokens(token: Seq[String]) extends NotificationTarget /** * Topic model. */ -case class Topic(topic: String) extends NotificationTarget +final case class Topic(topic: String) extends NotificationTarget /** * Condition model. */ -case class Condition(conditionText: String) extends NotificationTarget +final case class Condition(conditionText: String) extends NotificationTarget object Condition { sealed trait ConditionBuilder { - def &&(condition: ConditionBuilder) = And(this, condition) - def ||(condition: ConditionBuilder) = Or(this, condition) - def unary_! = Not(this) + def &&(condition: ConditionBuilder): And = And(this, condition) + def ||(condition: ConditionBuilder): Or = Or(this, condition) + def unary_! : Not = Not(this) def toConditionText: String } - case class Topic(topic: String) extends ConditionBuilder { + final case class Topic(topic: String) extends ConditionBuilder { def toConditionText: String = s"'$topic' in topics" } - case class And(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { + final case class And(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"(${condition1.toConditionText} && ${condition2.toConditionText})" } - case class Or(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { + final case class Or(condition1: ConditionBuilder, condition2: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"(${condition1.toConditionText} || ${condition2.toConditionText})" } - case class Not(condition: ConditionBuilder) extends ConditionBuilder { + final case class Not(condition: ConditionBuilder) extends ConditionBuilder { def toConditionText: String = s"!(${condition.toConditionText})" } diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/PushKitNotification.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/PushKitNotification.scala index e5e6663fa..223ad8c9b 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/PushKitNotification.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/PushKitNotification.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.huawei.pushkit.models * Message model. * @see https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/https-send-api-0000001050986197-V5#EN-US_TOPIC_0000001134031085 */ -case class PushKitNotification(data: Option[String] = None, +final case class PushKitNotification(data: Option[String] = None, notification: Option[BasicNotification] = None, android: Option[AndroidConfig] = None, apns: Option[ApnsConfig] = None, diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/WebConfig.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/WebConfig.scala index 9326db38d..48841651e 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/WebConfig.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/models/WebConfig.scala @@ -17,7 +17,7 @@ package org.apache.pekko.stream.connectors.huawei.pushkit.models * WebPushConfig model. * @see https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/https-send-api-0000001050986197-V5#EN-US_TOPIC_0000001134031085 */ -case class WebConfig(hms_options: Option[String] = None, +final case class WebConfig(hms_options: Option[String] = None, headers: Option[Map[String, String]] = None, notification: Option[WebNotification] = None) { def withHmsOptions(options: String): WebConfig = this.copy(hms_options = Option(options)) @@ -34,7 +34,7 @@ object WebConfig { /** * WebNotification model. */ -case class WebNotification(title: Option[String] = None, +final case class WebNotification(title: Option[String] = None, body: Option[String] = None, icon: Option[String] = None, image: Option[String] = None, @@ -72,7 +72,7 @@ object WebNotification { /** * WebActions model. */ -case class WebActions(action: Option[String] = None, icon: Option[String] = None, title: Option[String] = None) { +final case class WebActions(action: Option[String] = None, icon: Option[String] = None, title: Option[String] = None) { def withAction(value: String): WebActions = this.copy(action = Option(value)) def withIcon(value: String): WebActions = this.copy(icon = Option(value)) def withTitle(value: String): WebActions = this.copy(title = Option(value)) diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala index 99f46463b..ec35b901e 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala @@ -45,7 +45,7 @@ class HmsTokenApiSpec with BeforeAndAfterAll with LogCapturing { - override def afterAll() = + override def afterAll(): Unit = TestKit.shutdownActorSystem(system) implicit val defaultPatience: PatienceConfig = diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala index 16a7f78ce..ff8d00a54 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala @@ -47,7 +47,7 @@ class PushKitSenderSpec import PushKitJsonSupport._ - override def afterAll() = + override def afterAll(): Unit = TestKit.shutdownActorSystem(system) implicit val defaultPatience: PatienceConfig = diff --git a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala index b32eaf112..ad1c4a230 100644 --- a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala +++ b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbFlowStage.scala @@ -18,6 +18,7 @@ import pekko.annotation.InternalApi import pekko.stream._ import pekko.stream.connectors.influxdb.{ InfluxDbWriteMessage, InfluxDbWriteResult } import pekko.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } +import pekko.util.unused import org.influxdb.InfluxDB import scala.collection.immutable @@ -36,7 +37,9 @@ private[influxdb] class InfluxDbFlowStage[C]( private val in = Inlet[immutable.Seq[InfluxDbWriteMessage[Point, C]]]("in") private val out = Outlet[immutable.Seq[InfluxDbWriteResult[Point, C]]]("out") - override val shape = FlowShape(in, out) + override val shape + : FlowShape[immutable.Seq[InfluxDbWriteMessage[Point, C]], immutable.Seq[InfluxDbWriteResult[Point, C]]] = + FlowShape(in, out) override protected def initialAttributes: Attributes = super.initialAttributes and Attributes(ActorAttributes.IODispatcher) @@ -51,14 +54,15 @@ private[influxdb] class InfluxDbFlowStage[C]( */ @InternalApi private[influxdb] class InfluxDbMapperFlowStage[T, C]( - clazz: Class[T], + clazz: Class[T] @unused, influxDB: InfluxDB) extends GraphStage[FlowShape[immutable.Seq[InfluxDbWriteMessage[T, C]], immutable.Seq[InfluxDbWriteResult[T, C]]]] { private val in = Inlet[immutable.Seq[InfluxDbWriteMessage[T, C]]]("in") private val out = Outlet[immutable.Seq[InfluxDbWriteResult[T, C]]]("out") - override val shape = FlowShape(in, out) + override val shape: FlowShape[immutable.Seq[InfluxDbWriteMessage[T, C]], immutable.Seq[InfluxDbWriteResult[T, C]]] = + FlowShape(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new InfluxDbMapperRecordLogic(influxDB, in, out, shape) @@ -70,7 +74,6 @@ private[influxdb] class InfluxDbMapperFlowStage[T, C]( */ @InternalApi private[influxdb] sealed abstract class InfluxDbLogic[T, C]( - influxDB: InfluxDB, in: Inlet[immutable.Seq[InfluxDbWriteMessage[T, C]]], out: Outlet[immutable.Seq[InfluxDbWriteResult[T, C]]], shape: FlowShape[immutable.Seq[InfluxDbWriteMessage[T, C]], immutable.Seq[InfluxDbWriteResult[T, C]]]) @@ -88,7 +91,7 @@ private[influxdb] sealed abstract class InfluxDbLogic[T, C]( val messages = grab(in) if (messages.nonEmpty) { write(messages) - val writtenMessages = messages.map(m => new InfluxDbWriteResult(m, None)) + val writtenMessages = messages.map(m => InfluxDbWriteResult(m, None)) emit(out, writtenMessages) } @@ -97,7 +100,7 @@ private[influxdb] sealed abstract class InfluxDbLogic[T, C]( protected def toBatchPoints(databaseName: Option[String], retentionPolicy: Option[String], - seq: Seq[InfluxDbWriteMessage[T, C]]) = { + seq: Seq[InfluxDbWriteMessage[T, C]]): BatchPoints = { val builder = BatchPoints.database(databaseName.orNull) @@ -105,7 +108,7 @@ private[influxdb] sealed abstract class InfluxDbLogic[T, C]( @tailrec def convert(messages: Seq[InfluxDbWriteMessage[T, C]]): BatchPoints = - if (messages.size == 0) builder.build() + if (messages.isEmpty) builder.build() else { builder.point(messages.head.point.asInstanceOf[Point]) convert(messages.tail) @@ -125,7 +128,7 @@ private[influxdb] final class InfluxDbRecordLogic[C]( in: Inlet[immutable.Seq[InfluxDbWriteMessage[Point, C]]], out: Outlet[immutable.Seq[InfluxDbWriteResult[Point, C]]], shape: FlowShape[immutable.Seq[InfluxDbWriteMessage[Point, C]], immutable.Seq[InfluxDbWriteResult[Point, C]]]) - extends InfluxDbLogic(influxDB, in, out, shape) { + extends InfluxDbLogic(in, out, shape) { override protected def write(messages: immutable.Seq[InfluxDbWriteMessage[Point, C]]): Unit = messages @@ -143,7 +146,7 @@ private[influxdb] final class InfluxDbMapperRecordLogic[T, C]( in: Inlet[immutable.Seq[InfluxDbWriteMessage[T, C]]], out: Outlet[immutable.Seq[InfluxDbWriteResult[T, C]]], shape: FlowShape[immutable.Seq[InfluxDbWriteMessage[T, C]], immutable.Seq[InfluxDbWriteResult[T, C]]]) - extends InfluxDbLogic(influxDB, in, out, shape) { + extends InfluxDbLogic(in, out, shape) { private val mapperHelper: PekkoConnectorsResultMapperHelper = new PekkoConnectorsResultMapperHelper @@ -153,7 +156,7 @@ private[influxdb] final class InfluxDbMapperRecordLogic[T, C]( .map(convertToBatchPoints) .foreach(influxDB.write) - def groupByDbRp(im: InfluxDbWriteMessage[T, C]) = + private def groupByDbRp(im: InfluxDbWriteMessage[T, C]) = ( im.databaseName match { case dbn: Some[String] => dbn @@ -164,7 +167,7 @@ private[influxdb] final class InfluxDbMapperRecordLogic[T, C]( case None => Some(mapperHelper.retentionPolicy(im.point.getClass)) }) - def convertToBatchPoints(wm: ((Some[String], Some[String]), immutable.Seq[InfluxDbWriteMessage[T, C]])) = + private def convertToBatchPoints(wm: ((Some[String], Some[String]), immutable.Seq[InfluxDbWriteMessage[T, C]])) = toBatchPoints(wm._1._1, wm._1._2, wm._2.map(im => im.withPoint(mapperHelper.convertModelToPoint(im.point).asInstanceOf[T]))) diff --git a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala index 29b4b2618..66d872c74 100644 --- a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala +++ b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/InfluxDbSourceStage.scala @@ -33,7 +33,7 @@ private[influxdb] final class InfluxDbSourceStage[T](clazz: Class[T], extends GraphStage[SourceShape[T]] { val out: Outlet[T] = Outlet("InfluxDb.out") - override val shape = SourceShape(out) + override val shape: SourceShape[T] = SourceShape(out) override protected def initialAttributes: Attributes = super.initialAttributes and Attributes(ActorAttributes.IODispatcher) @@ -55,7 +55,7 @@ private[influxdb] final class InfluxDbSourceLogic[T](clazz: Class[T], shape: SourceShape[T]) extends InfluxDbBaseSourceLogic[T](influxDB, query, outlet, shape) { - var resultMapperHelper: PekkoConnectorsResultMapperHelper = _ + private var resultMapperHelper: PekkoConnectorsResultMapperHelper = _ override def preStart(): Unit = { resultMapperHelper = new PekkoConnectorsResultMapperHelper @@ -65,18 +65,16 @@ private[influxdb] final class InfluxDbSourceLogic[T](clazz: Class[T], override def onPull(): Unit = this.dataRetrieved match { case None => completeStage() - case Some(queryResult) => { + case Some(queryResult) => for (result <- queryResult.getResults.asScala) { - if (result.hasError) { + if (result.hasError) failStage(new InfluxDBException(result.getError)) - } else { + else for (series <- result.getSeries.asScala) { emitMultiple(outlet, resultMapperHelper.parseSeriesAs(clazz, series, settings.precision)) } - } } dataRetrieved = None - } } } @@ -89,7 +87,7 @@ private[influxdb] final class InfluxDbRawSourceStage(query: Query, influxDB: Inf extends GraphStage[SourceShape[QueryResult]] { val out: Outlet[QueryResult] = Outlet("InfluxDb.out") - override val shape = SourceShape(out) + override val shape: SourceShape[QueryResult] = SourceShape(out) override protected def initialAttributes: Attributes = super.initialAttributes and Attributes(ActorAttributes.IODispatcher) @@ -112,10 +110,9 @@ private[influxdb] final class InfluxDbSourceRawLogic(query: Query, override def onPull(): Unit = dataRetrieved match { case None => completeStage() - case Some(queryResult) => { + case Some(queryResult) => emit(outlet, queryResult) dataRetrieved = None - } } override protected def validateTotalResults: Boolean = true @@ -134,13 +131,13 @@ private[impl] sealed abstract class InfluxDbBaseSourceLogic[T](influxDB: InfluxD setHandler(outlet, this) - var queryExecuted: Boolean = false + private var queryExecuted: Boolean = false var dataRetrieved: Option[QueryResult] = None override def preStart(): Unit = runQuery() - private def runQuery() = + private def runQuery(): Unit = if (!queryExecuted) { val queryResult = influxDB.query(query) if (!queryResult.hasError) { @@ -155,7 +152,7 @@ private[impl] sealed abstract class InfluxDbBaseSourceLogic[T](influxDB: InfluxD protected def validateTotalResults: Boolean = false - private def failOnError(result: QueryResult) = + private def failOnError(result: QueryResult): Unit = if (validateTotalResults) { val totalErrors = result.getResults.asScala .filter(_.hasError) diff --git a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala index 9bcc17dbb..52155f803 100644 --- a/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala +++ b/influxdb/src/main/scala/org/apache/pekko/stream/connectors/influxdb/impl/PekkoConnectorsResultMapperHelper.scala @@ -34,7 +34,7 @@ import org.influxdb.dto.Point @InternalApi private[impl] class PekkoConnectorsResultMapperHelper { - val CLASS_FIELD_CACHE: ConcurrentHashMap[String, ConcurrentMap[String, Field]] = new ConcurrentHashMap(); + private val CLASS_FIELD_CACHE: ConcurrentHashMap[String, ConcurrentMap[String, Field]] = new ConcurrentHashMap() private val FRACTION_MIN_WIDTH = 0 private val FRACTION_MAX_WIDTH = 9 @@ -47,10 +47,10 @@ private[impl] class PekkoConnectorsResultMapperHelper { .toFormatter private[impl] def databaseName(point: Class[_]): String = - point.getAnnotation(classOf[Measurement]).database(); + point.getAnnotation(classOf[Measurement]).database() private[impl] def retentionPolicy(point: Class[_]): String = - point.getAnnotation(classOf[Measurement]).retentionPolicy(); + point.getAnnotation(classOf[Measurement]).retentionPolicy() private[impl] def convertModelToPoint[T](model: T): Point = { throwExceptionIfMissingAnnotation(model.getClass) @@ -59,61 +59,56 @@ private[impl] class PekkoConnectorsResultMapperHelper { val colNameAndFieldMap: ConcurrentMap[String, Field] = CLASS_FIELD_CACHE.get(model.getClass.getName) try { - val modelType = model.getClass(); - val measurement = measurementName(modelType); - val timeUnit: TimeUnit = this.timeUnit(modelType); - val time = timeUnit.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS); - val pointBuilder: Point.Builder = Point.measurement(measurement).time(time, timeUnit); + val modelType = model.getClass + val measurement = measurementName(modelType) + val timeUnit: TimeUnit = this.timeUnit(modelType) + val time = timeUnit.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + val pointBuilder: Point.Builder = Point.measurement(measurement).time(time, timeUnit) for (key <- colNameAndFieldMap.keySet().asScala) { val field = colNameAndFieldMap.get(key) val column = field.getAnnotation(classOf[Column]) val columnName: String = column.name() - val fieldType: Class[_] = field.getType() + val fieldType: Class[_] = field.getType - val isAccessible = field.isAccessible() // deprecated in JDK 11+ - if (!isAccessible) { - field.setAccessible(true); - } + val isAccessible = field.isAccessible // deprecated in JDK 11+ + if (!isAccessible) + field.setAccessible(true) - val value = field.get(model); + val value = field.get(model) - if (column.tag()) { - pointBuilder.tag(columnName, value.toString()); - } else if ("time".equals(columnName)) { - if (value != null) { - setTime(pointBuilder, fieldType, timeUnit, value); - } - } else { - setField(pointBuilder, fieldType, columnName, value); - } + if (column.tag()) + pointBuilder.tag(columnName, value.toString) + else if ("time".equals(columnName)) { + if (value != null) + setTime(pointBuilder, fieldType, timeUnit, value) + } else + setField(pointBuilder, fieldType, columnName, value) } - pointBuilder.build(); + pointBuilder.build() } catch { case e: IllegalArgumentException => throw new InfluxDBMapperException(e); } } - private[impl] def cacheClassFields(clazz: Class[_]) = + private[impl] def cacheClassFields(clazz: Class[_]): Unit = if (!CLASS_FIELD_CACHE.containsKey(clazz.getName)) { val initialMap: ConcurrentMap[String, Field] = new ConcurrentHashMap() var influxColumnAndFieldMap = CLASS_FIELD_CACHE.putIfAbsent(clazz.getName, initialMap) - if (influxColumnAndFieldMap == null) { - influxColumnAndFieldMap = initialMap; - } + if (influxColumnAndFieldMap == null) + influxColumnAndFieldMap = initialMap - var c = clazz; + var c = clazz while (c != null) { - for (field <- c.getDeclaredFields()) { - val colAnnotation = field.getAnnotation(classOf[Column]); - if (colAnnotation != null) { - influxColumnAndFieldMap.put(colAnnotation.name(), field); - } + for (field <- c.getDeclaredFields) { + val colAnnotation = field.getAnnotation(classOf[Column]) + if (colAnnotation != null) + influxColumnAndFieldMap.put(colAnnotation.name(), field) } - c = c.getSuperclass(); + c = c.getSuperclass } } @@ -125,7 +120,7 @@ private[impl] class PekkoConnectorsResultMapperHelper { } private def measurementName(point: Class[_]): String = - point.getAnnotation(classOf[Measurement]).name(); + point.getAnnotation(classOf[Measurement]).name() private def timeUnit(point: Class[_]): TimeUnit = point.getAnnotation(classOf[Measurement]).timeUnit() @@ -164,9 +159,8 @@ private[impl] class PekkoConnectorsResultMapperHelper { val obj: T = clazz.getDeclaredConstructor().newInstance() for (i <- 0 until columns.size()) { val correspondingField = fieldMap.get(columns.get(i)) - if (correspondingField != null) { + if (correspondingField != null) setFieldValue(obj, correspondingField, values.get(i), precision) - } } obj } catch { @@ -180,7 +174,7 @@ private[impl] class PekkoConnectorsResultMapperHelper { if (value == null) return val fieldType = field.getType try { - val isAccessible = field.isAccessible() // deprecated in JDK 11+ + val isAccessible = field.isAccessible // deprecated in JDK 11+ if (!isAccessible) field.setAccessible(true) if (fieldValueModified(fieldType, field, obj, value, precision) || fieldValueForPrimitivesModified( fieldType, @@ -191,10 +185,10 @@ private[impl] class PekkoConnectorsResultMapperHelper { s"""Class '${obj.getClass.getName}' field '${field.getName}' is from an unsupported type '${field.getType}'.""" throw new InfluxDBMapperException(msg) } catch { - case e: ClassCastException => + case _: ClassCastException => val msg = s"""Class '${obj.getClass.getName}' field '${field.getName}' was defined with a different field type and caused a ClassCastException. - |The correct type is '${value.getClass.getName}' (current field value: '${value}')""".stripMargin + |The correct type is '${value.getClass.getName}' (current field value: '$value')""".stripMargin throw new InfluxDBMapperException(msg) } } @@ -214,9 +208,8 @@ private[impl] class PekkoConnectorsResultMapperHelper { } else if (classOf[Boolean].isAssignableFrom(fieldType)) { field.setBoolean(obj, String.valueOf(value).toBoolean) true - } else { + } else false - } @throws[IllegalArgumentException] @throws[IllegalAccessException] @@ -236,9 +229,8 @@ private[impl] class PekkoConnectorsResultMapperHelper { } else if (classOf[java.lang.Boolean].isAssignableFrom(fieldType)) { field.set(obj, value.asInstanceOf[java.lang.Boolean]) true - } else { + } else false - } @throws[IllegalArgumentException] @throws[IllegalAccessException] @@ -254,19 +246,17 @@ private[impl] class PekkoConnectorsResultMapperHelper { val instant: Instant = getInstant(field, value, precision) field.set(obj, instant) true - } else { + } else false - } private def getInstant(field: Field, value: Any, precision: TimeUnit): Instant = - if (value.isInstanceOf[String]) Instant.from(RFC3339_FORMATTER.parse(String.valueOf(value))) - else if (value.isInstanceOf[java.lang.Long]) Instant.ofEpochMilli(toMillis(value.asInstanceOf[Long], precision)) - else if (value.isInstanceOf[java.lang.Double]) - Instant.ofEpochMilli(toMillis(value.asInstanceOf[java.lang.Double].longValue, precision)) - else if (value.isInstanceOf[java.lang.Integer]) - Instant.ofEpochMilli(toMillis(value.asInstanceOf[Integer].longValue, precision)) - else { - throw new InfluxDBMapperException(s"""Unsupported type ${field.getClass} for field ${field.getName}""") + value match { + case _: String => Instant.from(RFC3339_FORMATTER.parse(String.valueOf(value))) + case _: java.lang.Long => Instant.ofEpochMilli(toMillis(value.asInstanceOf[Long], precision)) + case double: java.lang.Double => Instant.ofEpochMilli(toMillis(double.longValue, precision)) + case integer: Integer => Instant.ofEpochMilli(toMillis(integer.longValue, precision)) + case _ => + throw new InfluxDBMapperException(s"""Unsupported type ${field.getClass} for field ${field.getName}""") } private def toMillis(value: Long, precision: TimeUnit) = TimeUnit.MILLISECONDS.convert(value, precision) diff --git a/influxdb/src/test/java/docs/javadsl/InfluxDbTest.java b/influxdb/src/test/java/docs/javadsl/InfluxDbTest.java index 532c522cf..8eb48aa6c 100644 --- a/influxdb/src/test/java/docs/javadsl/InfluxDbTest.java +++ b/influxdb/src/test/java/docs/javadsl/InfluxDbTest.java @@ -123,10 +123,10 @@ public void testConsumeAndPublishMeasurementsUsingTyped() throws Exception { InfluxDbSource.typed(InfluxDbCpu.class, InfluxDbReadSettings.Default(), influxDB, query) .map( cpu -> { - InfluxDbCpu clonedCpu = cpu.cloneAt(cpu.getTime().plusSeconds(60000l)); + InfluxDbCpu clonedCpu = cpu.cloneAt(cpu.getTime().plusSeconds(60000L)); return InfluxDbWriteMessage.create(clonedCpu, NotUsed.notUsed()); }) - .groupedWithin(10, Duration.of(50l, ChronoUnit.MILLIS)) + .groupedWithin(10, Duration.of(50L, ChronoUnit.MILLIS)) .runWith(InfluxDbSink.typed(InfluxDbCpu.class, influxDB), system); // #run-typed @@ -146,9 +146,9 @@ public void testConsumeAndPublishMeasurements() throws Exception { CompletionStage completionStage = InfluxDbSource.create(influxDB, query) - .map(queryResult -> points(queryResult)) + .map(this::points) .mapConcat(i -> i) - .groupedWithin(10, Duration.of(50l, ChronoUnit.MILLIS)) + .groupedWithin(10, Duration.of(50L, ChronoUnit.MILLIS)) .runWith(InfluxDbSink.create(influxDB), system); // #run-query-result @@ -195,7 +195,7 @@ public void typedStreamWithPassThrough() throws Exception { // After we've written them to InfluxDb, we want // to commit the offset to Kafka - /** Just clean the previous data */ + /* Just clean the previous data **/ influxDB.query(new Query("DELETE FROM cpu")); List committedOffsets = new ArrayList<>(); @@ -219,10 +219,8 @@ public void typedStreamWithPassThrough() throws Exception { Source.from(messageFromKafka) .map( - kafkaMessage -> { - return InfluxDbWriteMessage.create( - kafkaMessage.influxDbCpu, kafkaMessage.kafkaOffset); - }) + kafkaMessage -> InfluxDbWriteMessage.create( + kafkaMessage.influxDbCpu, kafkaMessage.kafkaOffset)) .groupedWithin(10, Duration.ofMillis(10)) .via(InfluxDbFlow.typedWithPassThrough(InfluxDbCpu.class, influxDB)) .map( @@ -248,7 +246,7 @@ public void typedStreamWithPassThrough() throws Exception { InfluxDbReadSettings.Default(), influxDB, new Query("SELECT*FROM cpu")) - .map(m -> m.getHostname()) + .map(Cpu::getHostname) .runWith(Sink.seq(), system) .toCompletableFuture() .get(10, TimeUnit.SECONDS); diff --git a/influxdb/src/test/java/docs/javadsl/TestUtils.java b/influxdb/src/test/java/docs/javadsl/TestUtils.java index d4a1d2d10..4a6990b66 100644 --- a/influxdb/src/test/java/docs/javadsl/TestUtils.java +++ b/influxdb/src/test/java/docs/javadsl/TestUtils.java @@ -48,10 +48,10 @@ public static void populateDatabase(InfluxDB influxDB, Class clazz) throws Ex Instant.class, String.class, String.class, Double.class, Boolean.class, Long.class); Object firstCore = cons.newInstance( - Instant.now().minusSeconds(1000), "local_1", "eu-west-2", 1.4d, true, 123l); + Instant.now().minusSeconds(1000), "local_1", "eu-west-2", 1.4d, true, 123L); influxDBMapper.save(firstCore); Object secondCore = - cons.newInstance(Instant.now().minusSeconds(500), "local_2", "eu-west-2", 1.4d, true, 123l); + cons.newInstance(Instant.now().minusSeconds(500), "local_2", "eu-west-2", 1.4d, true, 123L); influxDBMapper.save(secondCore); } diff --git a/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala b/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala index 246443dba..d31f0d4d8 100644 --- a/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala @@ -85,8 +85,8 @@ class FlowSpec // After we've written them to InfluxDB, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(cpu: InfluxDbFlowCpu, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(cpu: InfluxDbFlowCpu, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(new InfluxDbFlowCpu(Instant.now().minusSeconds(1000), "local_1", "eu-west-2", 1.4d, true, 123L), diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala index 35c86547d..6dd3d141f 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala @@ -52,7 +52,7 @@ class InfluxDbSourceSpec override def beforeEach(): Unit = populateDatabase(influxDB, classOf[InfluxDbSourceCpu]) - override def afterEach() = + override def afterEach(): Unit = cleanDatabase(influxDB, DatabaseName) "support source" in assertAllStagesStopped { @@ -62,7 +62,7 @@ class InfluxDbSourceSpec val influxDBResult = InfluxDbSource(influxDB, query).runWith(Sink.seq) val resultToAssert = influxDBResult.futureValue.head - val values = resultToAssert.getResults.get(0).getSeries().get(0).getValues + val values = resultToAssert.getResults.get(0).getSeries.get(0).getValues values.size() mustBe 2 } @@ -86,7 +86,7 @@ class InfluxDbSourceSpec val influxDBResult = InfluxDbSource(influxDB, query).runWith(Sink.seq) val resultToAssert = influxDBResult.futureValue.head - val valuesFetched = resultToAssert.getResults.get(0).getSeries().get(0).getValues + val valuesFetched = resultToAssert.getResults.get(0).getSeries.get(0).getValues valuesFetched.size() mustBe 2 val error = resultToAssert.getResults.get(1).getError diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala index 4de4d3817..abe1decb5 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala @@ -50,9 +50,9 @@ class InfluxDbSpec // #define-class override protected def beforeAll(): Unit = { // #init-client - influxDB = InfluxDBFactory.connect(INFLUXDB_URL, USERNAME, PASSWORD); - influxDB.setDatabase(DatabaseName); - influxDB.query(new Query("CREATE DATABASE " + DatabaseName, DatabaseName)); + influxDB = InfluxDBFactory.connect(INFLUXDB_URL, USERNAME, PASSWORD) + influxDB.setDatabase(DatabaseName) + influxDB.query(new Query("CREATE DATABASE " + DatabaseName, DatabaseName)) // #init-client } @@ -62,11 +62,11 @@ class InfluxDbSpec override def beforeEach(): Unit = populateDatabase(influxDB, classOf[InfluxDbSpecCpu]) - override def afterEach() = + override def afterEach(): Unit = cleanDatabase(influxDB, DatabaseName) "support typed source" in assertAllStagesStopped { - val query = new Query("SELECT * FROM cpu", DatabaseName); + val query = new Query("SELECT * FROM cpu", DatabaseName) val measurements = InfluxDbSource.typed(classOf[InfluxDbSpecCpu], InfluxDbReadSettings(), influxDB, query).runWith(Sink.seq) @@ -76,7 +76,7 @@ class InfluxDbSpec "InfluxDbFlow" should { "consume and publish measurements using typed" in assertAllStagesStopped { - val query = new Query("SELECT * FROM cpu", DatabaseName); + val query = new Query("SELECT * FROM cpu", DatabaseName) // #run-typed val f1 = InfluxDbSource @@ -100,7 +100,7 @@ class InfluxDbSpec "consume and publish measurements" in assertAllStagesStopped { // #run-query-result - val query = new Query("SELECT * FROM cpu", DatabaseName); + val query = new Query("SELECT * FROM cpu", DatabaseName) val f1 = InfluxDbSource(influxDB, query) .map(resultToPoints) diff --git a/ironmq/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/ironmq/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..4e1d5a5c8 --- /dev/null +++ b/ironmq/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.ironmq.Message") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.ironmq.Message$Ids") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.ironmq.PushMessage") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.ironmq.javadsl.CommittablePushMessage") diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSettings.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSettings.scala index 652645f34..05e7b0822 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSettings.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSettings.scala @@ -53,7 +53,7 @@ final class IronMqSettings private ( token = token, consumerSettings = consumerSettings) - override def toString = + override def toString: String = "IronMqSettings(" + s"endpoint=$endpoint," + s"projectId=$projectId," + @@ -130,7 +130,7 @@ object IronMqSettings { pollTimeout = pollTimeout, reservationTimeout = reservationTimeout) - override def toString = + override def toString: String = "ConsumerSettings(" + s"bufferMinSize=$bufferMinSize," + s"bufferMaxSize=$bufferMaxSize," + diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/domain.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/domain.scala index f52ae6f55..b332f980d 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/domain.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/domain.scala @@ -16,7 +16,7 @@ package org.apache.pekko.stream.connectors.ironmq import scala.concurrent.duration.{ Duration, FiniteDuration } import org.apache.pekko.util.JavaDurationConverters._ -case class PushMessage(body: String, delay: FiniteDuration = Duration.Zero) +final case class PushMessage(body: String, delay: FiniteDuration = Duration.Zero) object PushMessage { @@ -33,13 +33,13 @@ object PushMessage { * @param body The pushed message content. * @param noOfReservations It is the count of how many time the message has been reserved (and released or expired) previously */ -case class Message(messageId: Message.Id, body: String, noOfReservations: Int) +final case class Message(messageId: Message.Id, body: String, noOfReservations: Int) object Message { - case class Id(value: String) extends AnyVal { + final case class Id(value: String) extends AnyVal { override def toString: String = value } - case class Ids(ids: List[Id]) + final case class Ids(ids: List[Id]) } diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/Codec.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/Codec.scala index 63f8eadea..f7d6e8600 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/Codec.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/Codec.scala @@ -24,11 +24,11 @@ import io.circe.{ Decoder, Encoder, Json } * * @param name The name associated with this Queue. */ -private[ironmq] case class Queue(name: Queue.Name) +private[ironmq] final case class Queue(name: Queue.Name) private[ironmq] object Queue { - case class Name(value: String) extends AnyVal { + final case class Name(value: String) extends AnyVal { override def toString: String = value } } diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqClient.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqClient.scala index f6143a2fc..c735a17db 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqClient.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqClient.scala @@ -119,7 +119,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto * Create a new queue, with default parameters, with the given name. */ def createQueue(name: String)(implicit ec: ExecutionContext): Future[String] = - makeRequest(Put(Uri(s"$queuesPath/${name}"), Json.obj())) + makeRequest(Put(Uri(s"$queuesPath/$name"), Json.obj())) .flatMap(Unmarshal(_).to[Json]) .map(_.hcursor.downField("queue").as[Queue]) .collect { @@ -130,7 +130,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto * Delete the queue with the given name. */ def deleteQueue(name: String)(implicit ec: ExecutionContext): Future[Done] = - makeRequest(Delete(Uri(s"$queuesPath/${name}"))).map(_ => Done) + makeRequest(Delete(Uri(s"$queuesPath/$name"))).map(_ => Done) /** * Produce the given messages to the queue with the given name. Return the ids ot the produced messages. @@ -143,7 +143,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto Json.obj("body" -> Json.fromString(pm.body), "delay" -> Json.fromLong(pm.delay.toSeconds)) })) - makeRequest(Post(Uri(s"$queuesPath/${queueName}/messages"), payload)).flatMap(Unmarshal(_).to[Message.Ids]) + makeRequest(Post(Uri(s"$queuesPath/$queueName/messages"), payload)).flatMap(Unmarshal(_).to[Message.Ids]) } /** @@ -163,18 +163,17 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto timeout: Duration = Duration.Undefined, watch: Duration = Duration.Undefined)(implicit ec: ExecutionContext): Future[Iterable[ReservedMessage]] = { - val payload = (if (timeout.isFinite) { + val payload = (if (timeout.isFinite) Json.obj("timeout" -> Json.fromLong(timeout.toSeconds)) - } else { - Json.Null - }).deepMerge(if (watch.isFinite) { + else + Json.Null).deepMerge(if (watch.isFinite) { Json.obj("wait" -> Json.fromLong(watch.toSeconds)) } else { Json.Null }).deepMerge(Json.obj("n" -> Json.fromInt(noOfMessages), "delete" -> Json.fromBoolean(false))) - makeRequest(Post(Uri(s"$queuesPath/${queueName}/reservations"), payload)) + makeRequest(Post(Uri(s"$queuesPath/$queueName/reservations"), payload)) .flatMap(Unmarshal(_).to[Json]) .map { json => json.hcursor.downField("messages").as[Iterable[ReservedMessage]] @@ -196,13 +195,13 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto def pullMessages(queueName: String, noOfMessages: Int = 1, watch: Duration = Duration.Undefined)( implicit ec: ExecutionContext): Future[Iterable[Message]] = { - val payload = (if (watch.isFinite) { + val payload = (if (watch.isFinite) Json.obj("wait" -> Json.fromLong(watch.toSeconds)) - } else { - Json.Null - }).deepMerge(Json.obj("n" -> Json.fromInt(noOfMessages), "delete" -> Json.fromBoolean(true))) + else + Json.Null).deepMerge(Json.obj("n" -> Json.fromInt(noOfMessages), + "delete" -> Json.fromBoolean(true))) - makeRequest(Post(Uri(s"$queuesPath/${queueName}/reservations"), payload)) + makeRequest(Post(Uri(s"$queuesPath/$queueName/reservations"), payload)) .flatMap(Unmarshal(_).to[Json]) .map { json => json.hcursor.downField("messages").as[Iterable[Message]] @@ -228,7 +227,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto Json.Null }).deepMerge(Json.obj("reservation_id" -> reservation.reservationId.asJson)) - makeRequest(Post(s"$queuesPath/${queueName}/messages/${reservation.messageId}/touch", payload)) + makeRequest(Post(s"$queuesPath/$queueName/messages/${reservation.messageId}/touch", payload)) .flatMap(Unmarshal(_).to[Json]) .map { json => for { @@ -254,7 +253,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto def peekMessages(queueName: String, numberOfMessages: Int = 1)(implicit ec: ExecutionContext): Future[Iterable[Message]] = makeRequest( - Get(Uri(s"$queuesPath/${queueName}/messages").withQuery(Uri.Query("n" -> numberOfMessages.toString)))).flatMap( + Get(Uri(s"$queuesPath/$queueName/messages").withQuery(Uri.Query("n" -> numberOfMessages.toString)))).flatMap( Unmarshal(_).to[Json]) .map { json => json.hcursor.downField("messages").as[Iterable[Message]] @@ -273,7 +272,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto val payload = Json.obj("ids" -> Json.fromValues(reservations.map(_.asJson))) - makeRequest(Delete(Uri(s"$queuesPath/${queueName}/messages"), payload)).map(_ => ()) + makeRequest(Delete(Uri(s"$queuesPath/$queueName/messages"), payload)).map(_ => ()) } @@ -289,7 +288,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto val payload = Json.obj("reservation_id" -> reservation.reservationId.asJson, "delay" -> delay.toSeconds.asJson) - makeRequest(Post(Uri(s"$queuesPath/${queueName}/messages/${reservation.messageId.value}/release"), payload)) + makeRequest(Post(Uri(s"$queuesPath/$queueName/messages/${reservation.messageId.value}/release"), payload)) .map(_ => ()) } @@ -301,7 +300,7 @@ private[ironmq] final class IronMqClient(settings: IronMqSettings)(implicit acto * @param queueName The queue to be purged. */ def clearMessages(queueName: String)(implicit ec: ExecutionContext): Future[Unit] = - makeRequest(Delete(Uri(s"$queuesPath/${queueName}/messages"), Json.obj())).map(_ => ()) + makeRequest(Delete(Uri(s"$queuesPath/$queueName/messages"), Json.obj())).map(_ => ()) private val queuesPath = s"/3/projects/${settings.projectId}/queues" diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala index 2209d120d..a9b3e864e 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala @@ -78,9 +78,8 @@ private[ironmq] final class IronMqPullStage(queueName: String, settings: IronMqS new OutHandler { override def onPull(): Unit = { - if (!isTimerActive(FetchMessagesTimerKey)) { + if (!isTimerActive(FetchMessagesTimerKey)) scheduleAtFixedRate(FetchMessagesTimerKey, fetchInterval, fetchInterval) - } deliveryMessages() } }) diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPushStage.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPushStage.scala index 06543b6ba..c45548e06 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPushStage.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPushStage.scala @@ -90,13 +90,12 @@ private[ironmq] class IronMqPushStage(queueName: String, settings: IronMqSetting tryPull(in) }) - private def checkForCompletion() = - if (isClosed(in) && runningFutures <= 0) { + private def checkForCompletion(): Unit = + if (isClosed(in) && runningFutures <= 0) exceptionFromUpstream match { case None => completeStage() case Some(ex) => failStage(ex) } - } private val futureCompleted = getAsyncCallback[Unit] { _ => runningFutures = runningFutures - 1 diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/ReservedMessage.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/ReservedMessage.scala index abdcc0149..ae40533c2 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/ReservedMessage.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/ReservedMessage.scala @@ -26,7 +26,7 @@ import pekko.stream.connectors.ironmq.Message * @param message The fetched message. */ @InternalApi -private[ironmq] case class ReservedMessage(reservationId: Reservation.Id, message: Message) { +private[ironmq] final case class ReservedMessage(reservationId: Reservation.Id, message: Message) { val messageId: Message.Id = message.messageId val messageBody: String = message.body val reservation: Reservation = Reservation(messageId, reservationId) @@ -42,14 +42,14 @@ private[ironmq] case class ReservedMessage(reservationId: Reservation.Id, messag * @param reservationId The reservation id */ @InternalApi -private[ironmq] case class Reservation(messageId: Message.Id, reservationId: Reservation.Id) +private[ironmq] final case class Reservation(messageId: Message.Id, reservationId: Reservation.Id) /** * Internal API. */ @InternalApi private[ironmq] object Reservation { - case class Id(value: String) extends AnyVal { + final case class Id(value: String) extends AnyVal { override def toString: String = value } } diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala index 7c977977a..22cd6622c 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala @@ -87,7 +87,7 @@ object IronMqProducer { } -case class CommittablePushMessage[ToCommit](message: PushMessage, toCommit: ToCommit) +final case class CommittablePushMessage[ToCommit](message: PushMessage, toCommit: ToCommit) object CommittablePushMessage { diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala index 44a8dba71..467b99e84 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala @@ -40,9 +40,7 @@ package object javadsl { } private[javadsl] implicit class RichScalaCommittable(cm: ScalaCommittable) { - def asJava: Committable = new Committable { - override def commit(): CompletionStage[Done] = cm.commit().asJava - } + def asJava: Committable = () => cm.commit().asJava } private[javadsl] implicit class RichCommittableMessage(cm: CommittableMessage) { @@ -53,9 +51,7 @@ package object javadsl { } private[javadsl] implicit class RichCommittable(cm: Committable) { - def asScala: ScalaCommittable = new ScalaCommittable { - override def commit(): Future[Done] = cm.commit().asScala - } + def asScala: ScalaCommittable = () => cm.commit().asScala } } diff --git a/jms/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/jms/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..7cb15cb89 --- /dev/null +++ b/jms/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1,12 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.AckEnvelope") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.ConnectionRetryException") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.JmsConnectTimedOut") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.NullMapMessageEntry") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.NullMessageProperty") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.TxEnvelope") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.UnsupportedMapMessageEntryType") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.UnsupportedMessagePropertyType") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.UnsupportedMessageType") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.scaladsl.JmsConnectorState$Connecting") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.scaladsl.JmsConnectorState$Failed") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.jms.scaladsl.JmsConnectorState$Failing") diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Credentials.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Credentials.scala index 41957a485..a2a8c86eb 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Credentials.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Credentials.scala @@ -28,7 +28,7 @@ final class Credentials private ( username = username, password = password) - override def toString = + override def toString: String = "Credentials(" + s"username=$username," + s"password=${"*" * password.length}" + diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Envelopes.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Envelopes.scala index 6758e6b21..74f213661 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Envelopes.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Envelopes.scala @@ -20,14 +20,14 @@ import javax.jms import scala.concurrent.{ Future, Promise } -case class AckEnvelope private[jms] (message: jms.Message, private val jmsSession: JmsAckSession) { +final case class AckEnvelope private[jms] (message: jms.Message, private val jmsSession: JmsAckSession) { - val processed = new AtomicBoolean(false) + val processed: AtomicBoolean = new AtomicBoolean(false) def acknowledge(): Unit = if (processed.compareAndSet(false, true)) jmsSession.ack(message) } -case class TxEnvelope private[jms] (message: jms.Message, private val jmsSession: JmsSession) { +final case class TxEnvelope private[jms] (message: jms.Message, private val jmsSession: JmsSession) { private[this] val commitPromise = Promise[() => Unit]() diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Headers.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Headers.scala index a92baa4b7..1cd7d9ca1 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Headers.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Headers.scala @@ -34,7 +34,7 @@ object JmsCorrelationId { /** * Java API: create [[JmsCorrelationId]] */ - def create(correlationId: String) = JmsCorrelationId(correlationId) + def create(correlationId: String): JmsCorrelationId = JmsCorrelationId(correlationId) } final case class JmsReplyTo(jmsDestination: Destination) extends JmsHeader { @@ -46,12 +46,12 @@ object JmsReplyTo { /** * Reply to a queue with given name. */ - def queue(name: String) = JmsReplyTo(Queue(name)) + def queue(name: String): JmsReplyTo = JmsReplyTo(Queue(name)) /** * Reply to a topic with given name. */ - def topic(name: String) = JmsReplyTo(Topic(name)) + def topic(name: String): JmsReplyTo = JmsReplyTo(Topic(name)) } final case class JmsType(jmsType: String) extends JmsHeader { @@ -63,7 +63,7 @@ object JmsType { /** * Java API: create [[JmsType]] */ - def create(jmsType: String) = JmsType(jmsType) + def create(jmsType: String): JmsType = JmsType(jmsType) } final case class JmsTimeToLive(timeInMillis: Long) extends JmsHeader { @@ -80,7 +80,7 @@ object JmsTimeToLive { /** * Java API: create [[JmsTimeToLive]] */ - def create(timeToLive: Long, unit: TimeUnit) = JmsTimeToLive(unit.toMillis(timeToLive)) + def create(timeToLive: Long, unit: TimeUnit): JmsTimeToLive = JmsTimeToLive(unit.toMillis(timeToLive)) } /** @@ -95,7 +95,7 @@ object JmsPriority { /** * Java API: create [[JmsPriority]] */ - def create(priority: Int) = JmsPriority(priority) + def create(priority: Int): JmsPriority = JmsPriority(priority) } /** @@ -110,7 +110,7 @@ object JmsDeliveryMode { /** * Java API: create [[JmsDeliveryMode]] */ - def create(deliveryMode: Int) = JmsDeliveryMode(deliveryMode) + def create(deliveryMode: Int): JmsDeliveryMode = JmsDeliveryMode(deliveryMode) } final case class JmsMessageId(jmsMessageId: String) extends JmsHeader { @@ -122,7 +122,7 @@ object JmsMessageId { /** * Java API: create [[JmsMessageId]] */ - def create(messageId: String) = JmsMessageId(messageId) + def create(messageId: String): JmsMessageId = JmsMessageId(messageId) } final case class JmsTimestamp(jmsTimestamp: Long) extends JmsHeader { @@ -134,7 +134,7 @@ object JmsTimestamp { /** * Java API: create [[JmsTimestamp]] */ - def create(timestamp: Long) = JmsTimestamp(timestamp) + def create(timestamp: Long): JmsTimestamp = JmsTimestamp(timestamp) } final case class JmsRedelivered(jmsRedelivered: Boolean) extends JmsHeader { @@ -146,7 +146,7 @@ object JmsRedelivered { /** * Java API: create [[JmsRedelivered]] */ - def create(redelivered: Boolean) = JmsRedelivered(redelivered) + def create(redelivered: Boolean): JmsRedelivered = JmsRedelivered(redelivered) } final case class JmsExpiration(jmsExpiration: Long) extends JmsHeader { @@ -158,5 +158,5 @@ object JmsExpiration { /** * Java API: create [[JmsExpiration]] */ - def create(expiration: Long) = JmsExpiration(expiration) + def create(expiration: Long): JmsExpiration = JmsExpiration(expiration) } diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsBrowseSettings.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsBrowseSettings.scala index 648fe1ef9..2b1858cf5 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsBrowseSettings.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsBrowseSettings.scala @@ -68,7 +68,7 @@ final class JmsBrowseSettings private ( selector = selector, acknowledgeMode = acknowledgeMode) - override def toString = + override def toString: String = "JmsBrowseSettings(" + s"connectionFactory=$connectionFactory," + s"connectionRetrySettings=$connectionRetrySettings," + diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsConsumerSettings.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsConsumerSettings.scala index 5e92ce9f2..cf2013ba3 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsConsumerSettings.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsConsumerSettings.scala @@ -140,7 +140,7 @@ final class JmsConsumerSettings private ( failStreamOnAckTimeout = failStreamOnAckTimeout, connectionStatusSubscriptionTimeout = connectionStatusSubscriptionTimeout) - override def toString = + override def toString: String = "JmsConsumerSettings(" + s"connectionFactory=$connectionFactory," + s"connectionRetrySettings=$connectionRetrySettings," + diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsExceptions.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsExceptions.scala index fb5a2cb69..aa2c64fe1 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsExceptions.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsExceptions.scala @@ -24,36 +24,37 @@ import scala.util.control.NoStackTrace */ trait NonRetriableJmsException extends Exception -case class UnsupportedMessagePropertyType(propertyName: String, propertyValue: Any, message: JmsEnvelope[_]) +final case class UnsupportedMessagePropertyType(propertyName: String, propertyValue: Any, message: JmsEnvelope[_]) extends Exception( s"Jms property '$propertyName' has unknown type '${propertyValue.getClass.getName}'. " + "Only primitive types and String are supported as property values.") with NonRetriableJmsException @deprecated("Not used anywhere", "Alpakka 3.0.4") -case class NullMessageProperty(propertyName: String, message: JmsEnvelope[_]) +final case class NullMessageProperty(propertyName: String, message: JmsEnvelope[_]) extends Exception( s"null value was given for Jms property '$propertyName'.") with NonRetriableJmsException -case class UnsupportedMapMessageEntryType(entryName: String, entryValue: Any, message: JmsMapMessagePassThrough[_]) +final case class UnsupportedMapMessageEntryType(entryName: String, entryValue: Any, + message: JmsMapMessagePassThrough[_]) extends Exception( s"Jms MapMessage entry '$entryName' has unknown type '${entryValue.getClass.getName}'. " + "Only primitive types, String, and Byte array are supported as entry values.") with NonRetriableJmsException @deprecated("Not used anywhere", "Alpakka 3.0.4") -case class NullMapMessageEntry(entryName: String, message: JmsMapMessagePassThrough[_]) +final case class NullMapMessageEntry(entryName: String, message: JmsMapMessagePassThrough[_]) extends Exception( s"null value was given for Jms MapMessage entry '$entryName'.") with NonRetriableJmsException -case class UnsupportedMessageType(message: jms.Message) +final case class UnsupportedMessageType(message: jms.Message) extends Exception( s"Can't convert a ${message.getClass.getName} to a JmsMessage") with NonRetriableJmsException -case class ConnectionRetryException(message: String, cause: Throwable) extends Exception(message, cause) +final case class ConnectionRetryException(message: String, cause: Throwable) extends Exception(message, cause) case object RetrySkippedOnMissingConnection extends Exception("JmsProducer is not connected, send attempt skipped") @@ -61,7 +62,7 @@ case object RetrySkippedOnMissingConnection case object JmsNotConnected extends Exception("JmsConnector is not connected") with NoStackTrace -case class JmsConnectTimedOut(message: String) extends TimeoutException(message) +final case class JmsConnectTimedOut(message: String) extends TimeoutException(message) final class JmsTxAckTimeout(ackTimeout: Duration) extends TimeoutException(s"The TxEnvelope didn't get committed or rolled back within ack-timeout ($ackTimeout)") diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala index f7b85c742..aa6470b22 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala @@ -292,7 +292,7 @@ object JmsByteMessage { /** * create a byte message */ - def apply(bytes: Array[Byte]) = new JmsByteMessage(bytes = bytes) + def apply(bytes: Array[Byte]): JmsByteMessage = new JmsByteMessage(bytes = bytes) /** * Java API: create a byte message with pass-through @@ -466,7 +466,7 @@ object JmsByteStringMessage { /** * Create a byte message from a ByteString */ - def apply(byteString: ByteString) = new JmsByteStringMessage(byteString) + def apply(byteString: ByteString): JmsByteStringMessage = new JmsByteStringMessage(byteString) /** * Java API: Create a byte message from a ByteString with a pass-through attached @@ -478,7 +478,7 @@ object JmsByteStringMessage { /** * Java API: Create a byte message from a ByteString */ - def create(byteString: ByteString) = apply(byteString) + def create(byteString: ByteString): JmsByteStringMessage = apply(byteString) /** * Create a byte message from a [[javax.jms.BytesMessage]] with pass-through diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsProducerSettings.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsProducerSettings.scala index eaa3ce36a..86b89d01e 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsProducerSettings.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsProducerSettings.scala @@ -103,7 +103,7 @@ final class JmsProducerSettings private ( timeToLive = timeToLive, connectionStatusSubscriptionTimeout = connectionStatusSubscriptionTimeout) - override def toString = + override def toString: String = "JmsProducerSettings(" + s"connectionFactory=$connectionFactory," + s"connectionRetrySettings=$connectionRetrySettings," + diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/InternalConnectionState.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/InternalConnectionState.scala index 560cf6953..b77311484 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/InternalConnectionState.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/InternalConnectionState.scala @@ -33,12 +33,12 @@ private[jms] trait InternalConnectionState @InternalApi private[jms] object InternalConnectionState { case object JmsConnectorDisconnected extends InternalConnectionState - case class JmsConnectorInitializing(connection: Future[jms.Connection], + final case class JmsConnectorInitializing(connection: Future[jms.Connection], attempt: Int, backoffMaxed: Boolean, sessions: Int) extends InternalConnectionState - case class JmsConnectorConnected(connection: jms.Connection) extends InternalConnectionState - case class JmsConnectorStopping(completion: Try[Done]) extends InternalConnectionState - case class JmsConnectorStopped(completion: Try[Done]) extends InternalConnectionState + final case class JmsConnectorConnected(connection: jms.Connection) extends InternalConnectionState + final case class JmsConnectorStopping(completion: Try[Done]) extends InternalConnectionState + final case class JmsConnectorStopped(completion: Try[Done]) extends InternalConnectionState } diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala index 16c2a0da0..05b581546 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala @@ -68,9 +68,8 @@ private[jms] final class JmsAckSourceStage(settings: JmsConsumerSettings, destin try { handleMessage.invoke(AckEnvelope(message, session)) session.pendingAck += 1 - if (session.maxPendingAcksReached) { + if (session.maxPendingAcksReached) session.ackBackpressure() - } session.drainAcks() } catch { case e: jms.JMSException => diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsBrowseStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsBrowseStage.scala index 45a0078d6..662afa131 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsBrowseStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsBrowseStage.scala @@ -18,7 +18,9 @@ import pekko.annotation.InternalApi import pekko.stream.connectors.jms.{ Destination, JmsBrowseSettings } import pekko.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } import pekko.stream.{ ActorAttributes, Attributes, Outlet, SourceShape } + import javax.jms +import javax.jms.Message /** * Internal API. @@ -27,7 +29,7 @@ import javax.jms private[jms] final class JmsBrowseStage(settings: JmsBrowseSettings, queue: Destination) extends GraphStage[SourceShape[jms.Message]] { private val out = Outlet[jms.Message]("JmsBrowseStage.out") - val shape = SourceShape(out) + val shape: SourceShape[Message] = SourceShape(out) override protected def initialAttributes: Attributes = super.initialAttributes and Attributes.name("JmsBrowse") and ActorAttributes.IODispatcher @@ -68,10 +70,9 @@ private[jms] final class JmsBrowseStage(settings: JmsBrowseSettings, queue: Dest } def onPull(): Unit = - if (messages.hasMoreElements) { + if (messages.hasMoreElements) push(out, messages.nextElement()) - } else { + else complete(out) - } } } diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala index 3ddd31f0c..1273ba325 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala @@ -133,7 +133,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { destination.name) publishAndFailStage(ex) - case _: jms.JMSException | _: JmsConnectTimedOut => handleRetriableException(ex) + case _: jms.JMSException | _: JmsConnectTimedOut => handleRetryableException(ex) case _ => connectionState match { @@ -144,7 +144,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { } } - private def handleRetriableException(ex: Throwable): Unit = { + private def handleRetryableException(ex: Throwable): Unit = { closeSessions() connectionState match { case JmsConnectorInitializing(_, attempt, backoffMaxed, _) => @@ -152,7 +152,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { case JmsConnectorConnected(_) | JmsConnectorDisconnected => maybeReconnect(ex, 0, backoffMaxed = false) case _: JmsConnectorStopping | _: JmsConnectorStopped => logStoppingException(ex) - case other => + case _ => log.warning("received [{}] in connectionState={}", ex, connectionState) } } @@ -172,15 +172,14 @@ private[jms] trait JmsConnector[S <: JmsSession] { case Success(_) => connectionState match { case init @ JmsConnectorInitializing(c, _, _, sessions) => - if (sessions + 1 == jmsSettings.sessionCount) { + if (sessions + 1 == jmsSettings.sessionCount) c.foreach { c => updateState(JmsConnectorConnected(c)) log.info("{} connected", attributes.nameLifted.mkString) } - } else { + else updateState(init.copy(sessions = sessions + 1)) - } - case s => () + case _ => () } case Failure(ex: jms.JMSException) => @@ -261,7 +260,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { allSessions.foreach(_.foreach(onSession.invoke)) } - protected def closeConnection(connection: jms.Connection): Unit = { + private def closeConnection(connection: jms.Connection): Unit = { try { // deregister exception listener to clear reference from JMS client to the Pekko stage connection.setExceptionListener(null) @@ -294,14 +293,13 @@ private[jms] trait JmsConnector[S <: JmsSession] { closing } - private def closeSession(s: S): Unit = { + private def closeSession(s: S): Unit = try { cancelAckTimers(s) s.closeSession() } catch { case e: Throwable => log.error(e, "Error closing jms session") } - } protected def abortSessionsAsync(): Future[Unit] = { val aborting = Future @@ -345,11 +343,9 @@ private[jms] trait JmsConnector[S <: JmsSession] { val jmsConnection = openConnectionAttempt(startConnection) updateState(JmsConnectorInitializing(jmsConnection, attempt, backoffMaxed, 0)) jmsConnection.map { connection => - connection.setExceptionListener(new jms.ExceptionListener { - override def onException(ex: jms.JMSException): Unit = { - closeConnection(connection) - connectionFailedCB.invoke(ex) - } + connection.setExceptionListener((ex: jms.JMSException) => { + closeConnection(connection) + connectionFailedCB.invoke(ex) }) connection } @@ -411,11 +407,11 @@ object JmsConnector { sealed trait ConnectionAttemptStatus case object Connecting extends ConnectionAttemptStatus case object Connected extends ConnectionAttemptStatus - case object TimedOut extends ConnectionAttemptStatus + private case object TimedOut extends ConnectionAttemptStatus - final case class AttemptConnect(attempt: Int, backoffMaxed: Boolean) + private final case class AttemptConnect(attempt: Int, backoffMaxed: Boolean) final case class FlushAcknowledgementsTimerKey(jmsSession: JmsAckSession) - case object ConnectionStatusTimeout + private case object ConnectionStatusTimeout def connection: InternalConnectionState => Future[jms.Connection] = { case InternalConnectionState.JmsConnectorInitializing(c, _, _, _) => c diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala index 211995b7c..06164a4b2 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala @@ -64,11 +64,9 @@ private[jms] final class JmsConsumerStage(settings: JmsConsumerSettings, destina jmsSession .createConsumer(settings.selector) .map { consumer => - consumer.setMessageListener(new jms.MessageListener { - def onMessage(message: jms.Message): Unit = { - backpressure.acquire() - handleMessage.invoke(message) - } + consumer.setMessageListener((message: jms.Message) => { + backpressure.acquire() + handleMessage.invoke(message) }) } .onComplete(sessionOpenedCB.invoke) diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsMessageProducer.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsMessageProducer.scala index f427de436..66102d999 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsMessageProducer.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsMessageProducer.scala @@ -79,7 +79,7 @@ private class JmsMessageProducer(jmsProducer: jms.MessageProducer, jmsSession: J case objectMessage: JmsObjectMessagePassThrough[_] => jmsSession.session.createObjectMessage(objectMessage.serializable) - case pt: JmsPassThrough[_] => throw new IllegalArgumentException("can't create message for JmsPassThrough") + case _: JmsPassThrough[_] => throw new IllegalArgumentException("can't create message for JmsPassThrough") } @@ -137,9 +137,8 @@ private class JmsMessageProducer(jmsProducer: jms.MessageProducer, jmsSession: J private[impl] object JmsMessageProducer { def apply(jmsSession: JmsProducerSession, settings: JmsProducerSettings, epoch: Int): JmsMessageProducer = { val producer = jmsSession.session.createProducer(null) - if (settings.timeToLive.nonEmpty) { + if (settings.timeToLive.nonEmpty) producer.setTimeToLive(settings.timeToLive.get.toMillis) - } new JmsMessageProducer(producer, jmsSession, epoch) } } diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala index b0bcb5daa..e03cea1e2 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala @@ -143,9 +143,8 @@ private[jms] final class JmsProducerStage[E <: JmsEnvelope[PassThrough], PassThr new InHandler { override def onUpstreamFinish(): Unit = if (inFlightMessages.isEmpty) publishAndCompleteStage() - override def onUpstreamFailure(ex: Throwable): Unit = { + override def onUpstreamFailure(ex: Throwable): Unit = publishAndFailStage(ex) - } override def onPush(): Unit = { val elem: E = grab(in) @@ -188,9 +187,8 @@ private[jms] final class JmsProducerStage[E <: JmsEnvelope[PassThrough], PassThr Future(jmsProducer.send(envelope)).andThen { case tried => sendCompletedCB.invoke((send, tried, jmsProducer)) } - } else { + } else nextTryOrFail(send, RetrySkippedOnMissingConnection) - } } def nextTryOrFail(send: SendAttempt[E], ex: Throwable): Unit = { @@ -235,13 +233,13 @@ private[jms] final class JmsProducerStage[E <: JmsEnvelope[PassThrough], PassThr tryPull(in) private def pushNextIfPossible(): Unit = - if (inFlightMessages.isEmpty) { + if (inFlightMessages.isEmpty) // no messages in flight, are we about to complete? if (isClosed(in)) publishAndCompleteStage() else pullIfNeeded() - } else if (inFlightMessages.peek().elem eq NotYetThere) { + else if (inFlightMessages.peek().elem eq NotYetThere) // next message to be produced is still not there, we need to wait. pullIfNeeded() - } else if (isAvailable(out)) { + else if (isAvailable(out)) { val holder = inFlightMessages.dequeue() holder.elem match { case Success(elem) => @@ -266,14 +264,14 @@ private[jms] final class JmsProducerStage[E <: JmsEnvelope[PassThrough], PassThr @InternalApi private[jms] object JmsProducerStage { - val NotYetThere = Failure(new Exception with NoStackTrace) + private val NotYetThere = Failure(new Exception with NoStackTrace) /* * NOTE: the following code is heavily inspired by org.apache.pekko.stream.impl.fusing.MapAsync * * To get a condensed view of what the Holder is about, have a look there too. */ - class Holder[A](var elem: Try[A]) extends (Try[A] => Unit) { + private final class Holder[A](var elem: Try[A]) extends (Try[A] => Unit) { // To support both fail-fast when the supervision directive is Stop // and not calling the decider multiple times, we need to cache the decider result and re-use that @@ -292,7 +290,7 @@ private[jms] object JmsProducerStage { override def apply(t: Try[A]): Unit = elem = t } - case class SendAttempt[E <: JmsEnvelope[_]](envelope: E, + private final case class SendAttempt[E <: JmsEnvelope[_]](envelope: E, holder: Holder[E], attempt: Int = 0, backoffMaxed: Boolean = false) diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala index f6db22736..aa2c84db3 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala @@ -43,7 +43,8 @@ private[jms] final class JmsTxSourceStage(settings: JmsConsumerSettings, destina private final class JmsTxSourceStageLogic(inheritedAttributes: Attributes) extends SourceStageLogic[TxEnvelope](shape, out, settings, destination, inheritedAttributes) { - protected def createSession(connection: jms.Connection, createDestination: jms.Session => javax.jms.Destination) = { + protected def createSession( + connection: jms.Connection, createDestination: jms.Session => javax.jms.Destination): JmsConsumerSession = { val session = connection.createSession(true, settings.acknowledgeMode.getOrElse(AcknowledgeMode.SessionTransacted).mode) new JmsConsumerSession(connection, session, createDestination(session), graphStageDestination) @@ -57,33 +58,29 @@ private[jms] final class JmsTxSourceStage(settings: JmsConsumerSettings, destina session .createConsumer(settings.selector) .map { consumer => - consumer.setMessageListener(new jms.MessageListener { - - def onMessage(message: jms.Message): Unit = + consumer.setMessageListener((message: jms.Message) => + try { + val envelope = TxEnvelope(message, session) + handleMessage.invoke(envelope) try { - val envelope = TxEnvelope(message, session) - handleMessage.invoke(envelope) - try { - // JMS spec defines that commit/rollback must be done on the same thread. - // While some JMS implementations work without this constraint, IBM MQ is - // very strict about the spec and throws exceptions when called from a different thread. - val action = Await.result(envelope.commitFuture, settings.ackTimeout) - action() - } catch { - case _: TimeoutException => - val exception = new JmsTxAckTimeout(settings.ackTimeout) - session.session.rollback() - if (settings.failStreamOnAckTimeout) { - handleError.invoke(exception) - } else { - log.warning(exception.getMessage) - } - } + // JMS spec defines that commit/rollback must be done on the same thread. + // While some JMS implementations work without this constraint, IBM MQ is + // very strict about the spec and throws exceptions when called from a different thread. + val action = Await.result(envelope.commitFuture, settings.ackTimeout) + action() } catch { - case e: IllegalArgumentException => handleError.invoke(e) // Invalid envelope. Fail the stage. - case e: jms.JMSException => handleError.invoke(e) + case _: TimeoutException => + val exception = new JmsTxAckTimeout(settings.ackTimeout) + session.session.rollback() + if (settings.failStreamOnAckTimeout) + handleError.invoke(exception) + else + log.warning(exception.getMessage) } - }) + } catch { + case e: IllegalArgumentException => handleError.invoke(e) // Invalid envelope. Fail the stage. + case e: jms.JMSException => handleError.invoke(e) + }) } .onComplete(sessionOpenedCB.invoke) diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/Sessions.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/Sessions.scala index b5267d24d..3c5bf2f19 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/Sessions.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/Sessions.scala @@ -104,16 +104,15 @@ private[jms] final class JmsAckSession(override val connection: jms.Connection, override def abortSession(): Unit = stopMessageListenerAndCloseSession() - private def stopMessageListenerAndCloseSession(): Unit = { + private def stopMessageListenerAndCloseSession(): Unit = try { drainAcks() } finally { ackQueue.put(Left(SessionClosed)) session.close() } - } - def ackBackpressure() = { + def ackBackpressure(): Unit = { ackQueue.take() match { case Left(SessionClosed) => listenerRunning = false diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala index ac8266280..12ae295a5 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala @@ -18,7 +18,7 @@ import pekko.annotation.InternalApi import pekko.stream.connectors.jms.impl.InternalConnectionState.JmsConnectorStopping import pekko.stream.connectors.jms.{ Destination, JmsConsumerSettings } import pekko.stream.scaladsl.Source -import pekko.stream.stage.{ OutHandler, StageLogging, TimerGraphStageLogic } +import pekko.stream.stage.{ AsyncCallback, OutHandler, StageLogging, TimerGraphStageLogic } import pekko.stream.{ Attributes, Materializer, Outlet, SourceShape } import pekko.{ Done, NotUsed } @@ -84,7 +84,7 @@ private abstract class SourceStageLogic[T](shape: SourceShape[T], failStage(ex) } - protected val handleError = getAsyncCallback[Throwable] { e => + protected val handleError: AsyncCallback[Throwable] = getAsyncCallback[Throwable] { e => updateState(JmsConnectorStopping(Failure(e))) failStage(e) } @@ -96,16 +96,15 @@ private abstract class SourceStageLogic[T](shape: SourceShape[T], } private[jms] val handleMessage = getAsyncCallback[T] { msg => - if (isAvailable(out)) { - if (queue.isEmpty) { + if (isAvailable(out)) + if (queue.isEmpty) pushMessage(msg) - } else { + else { pushMessage(queue.dequeue()) queue.enqueue(msg) } - } else { + else queue.enqueue(msg) - } } protected def pushMessage(msg: T): Unit diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsConnectorState.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsConnectorState.scala index c494a26fc..95c204395 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsConnectorState.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsConnectorState.scala @@ -57,10 +57,10 @@ sealed trait JmsConnectorState { object JmsConnectorState { case object Disconnected extends JmsConnectorState - case class Connecting(attempt: Int) extends JmsConnectorState + final case class Connecting(attempt: Int) extends JmsConnectorState case object Connected extends JmsConnectorState case object Completing extends JmsConnectorState case object Completed extends JmsConnectorState - case class Failing(exception: Throwable) extends JmsConnectorState - case class Failed(exception: Throwable) extends JmsConnectorState + final case class Failing(exception: Throwable) extends JmsConnectorState + final case class Failed(exception: Throwable) extends JmsConnectorState } diff --git a/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala index a895c6318..40f77be75 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala @@ -560,9 +560,8 @@ class JmsConnectorsSpec extends JmsSpec { // connection should be either // - not yet initialized before broker stop, or // - closed on broker stop (if preStart came first). - if (connectionFactory.cachedConnection != null) { + if (connectionFactory.cachedConnection != null) connectionFactory.cachedConnection shouldBe Symbol("closed") - } } "ensure no message loss when stopping a stream" in withConnectionFactory() { connectionFactory => diff --git a/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala index 4c61c2481..784353b97 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala @@ -125,9 +125,8 @@ class JmsTxConnectorsSpec extends JmsSharedServerSpec { if (id % 2 == 0 && !rolledBackSet.contains(id)) { rolledBackSet.add(id) env.rollback() - } else { + } else env.commit() - } env.message.asInstanceOf[TextMessage].getText } .runWith(Sink.seq) @@ -465,10 +464,7 @@ class JmsTxConnectorsSpec extends JmsSharedServerSpec { val r = new java.util.Random - val thisDecider: Supervision.Decider = { - case ex => - Supervision.resume - } + val thisDecider: Supervision.Decider = _ => Supervision.resume val (killSwitch, streamDone) = jmsSource .throttle(10, 1.second, 2, ThrottleMode.shaping) @@ -558,10 +554,9 @@ class JmsTxConnectorsSpec extends JmsSharedServerSpec { .toMat( Sink.foreach { env => val text = env.message.asInstanceOf[TextMessage].getText - if (r.nextInt(3) <= 1) { + if (r.nextInt(3) <= 1) // Artificially timing out this message Thread.sleep(20) - } resultQueue.add(text) env.commit() })(Keep.both) @@ -637,10 +632,9 @@ class JmsTxConnectorsSpec extends JmsSharedServerSpec { .throttle(10, 1.second, 2, ThrottleMode.shaping) .toMat( Sink.foreach { env => - if (r.nextInt(3) <= 1) { + if (r.nextInt(3) <= 1) // Artificially timing out this message Thread.sleep(20) - } env.commit() })(Keep.both) .run() diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsProducerRetrySpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsProducerRetrySpec.scala index 1aa0fb1b8..1bbd87050 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsProducerRetrySpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsProducerRetrySpec.scala @@ -32,7 +32,7 @@ import scala.concurrent.duration._ class JmsProducerRetrySpec extends JmsSpec { override implicit val patienceConfig: PatienceConfig = PatienceConfig(20.seconds) - val stoppingDecider: Supervision.Decider = ex => Supervision.Stop + val stoppingDecider: Supervision.Decider = _ => Supervision.Stop "JmsProducer retries" should { "retry sending on network failures" in withServer() { server => @@ -114,7 +114,7 @@ class JmsProducerRetrySpec extends JmsSpec { .withMaxRetries(3))) .withAttributes(ActorAttributes.supervisionStrategy(stoppingDecider)) - val (cancellable, result) = Source + val (_, result) = Source .tick(50.millis, 50.millis, "") .zipWithIndex .map(e => JmsMapMessage(Map("time" -> System.currentTimeMillis(), "index" -> e._2))) @@ -164,7 +164,7 @@ class JmsProducerRetrySpec extends JmsSpec { "invoke supervisor when send fails" in withConnectionFactory() { connectionFactory => val wrappedConnectionFactory = new WrappedConnectionFactory(connectionFactory) val deciderCalls = new AtomicInteger() - val decider: Supervision.Decider = { ex => + val decider: Supervision.Decider = { _ => deciderCalls.incrementAndGet() Supervision.Resume } diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSharedServerSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSharedServerSpec.scala index 2d32fce5e..6bc4d2787 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSharedServerSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSharedServerSpec.scala @@ -33,16 +33,14 @@ abstract class JmsSharedServerSpec extends JmsSpec { override protected def afterAll(): Unit = { super.afterAll() - if (jmsBroker != null && jmsBroker.isStarted) { + if (jmsBroker != null && jmsBroker.isStarted) jmsBroker.stop() - } } protected def isQueueEmpty(queueName: String): Boolean = jmsBroker.service.checkQueueSize(queueName) - override def withConnectionFactory()(test: ConnectionFactory => Unit): Unit = { + override def withConnectionFactory()(test: ConnectionFactory => Unit): Unit = test(connectionFactory) - } def createName(prefix: String) = prefix + Random.nextInt().toString diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala index 0ae12ec68..4fed75a73 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala @@ -58,9 +58,8 @@ abstract class JmsSpec test(jmsBroker) Thread.sleep(500) } finally { - if (jmsBroker.isStarted) { + if (jmsBroker.isStarted) jmsBroker.stop() - } } } diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/impl/SoftReferenceCacheSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/impl/SoftReferenceCacheSpec.scala index bafed017f..d68e7eec3 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/impl/SoftReferenceCacheSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/impl/SoftReferenceCacheSpec.scala @@ -87,7 +87,7 @@ class SoftReferenceCacheSpec extends AnyWordSpec with Matchers { val ref = new AtomicReference(Option(new State())) ref.get.get.cache.lookup(0L, "0") - // dequeue/enqueue simulates memory visibility guarantees of Akka's async callbacks + // dequeue/enqueue simulates memory visibility guarantees of Pekko's async callbacks def dequeue(): Option[State] = { val seen = ref.get seen.filter(_ => ref.compareAndSet(seen, None)) @@ -138,9 +138,8 @@ class SoftReferenceCacheSpec extends AnyWordSpec with Matchers { info(s"Executed ${ref.get.get.counter} cache lookups") // verify - if (failed.get()) { + if (failed.get()) fail("Synchronization was broken") - } } } } diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/CachedConnectionFactory.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/CachedConnectionFactory.scala index 4a98489aa..3c8f2c842 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/CachedConnectionFactory.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/CachedConnectionFactory.scala @@ -20,12 +20,11 @@ import javax.jms.{ Connection, ConnectionFactory } */ class CachedConnectionFactory(connFactory: ConnectionFactory) extends ConnectionFactory { - var cachedConnection: Connection = null + var cachedConnection: Connection = _ override def createConnection(): Connection = { - if (cachedConnection == null) { + if (cachedConnection == null) cachedConnection = connFactory.createConnection() - } cachedConnection } diff --git a/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala b/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala index 8f50d51d2..8b79b0ef9 100644 --- a/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala +++ b/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala @@ -32,7 +32,7 @@ private[pekko] final class JsonStreamReader(path: JsonPath) extends GraphStage[F private val in = Inlet[ByteString]("Json.in") private val out = Outlet[ByteString]("Json.out") - override val shape = FlowShape(in, out) + override val shape: FlowShape[ByteString, ByteString] = FlowShape(in, out) override def initialAttributes: Attributes = Attributes.name(s"jsonReader($path)") @@ -48,10 +48,9 @@ private[pekko] final class JsonStreamReader(path: JsonPath) extends GraphStage[F private val config = surfer.configBuilder .bind(path, new JsonPathListener { - override def onValue(value: Any, context: ParsingContext): Unit = { + override def onValue(value: Any, context: ParsingContext): Unit = // see https://github.com/lampepfl/dotty/issues/17946 buffer = buffer.enqueue[ByteString](ByteString(value.toString)) - } }) .build private val parser = surfer.createNonBlockingParser(config) @@ -73,11 +72,10 @@ private[pekko] final class JsonStreamReader(path: JsonPath) extends GraphStage[F if (buffer.nonEmpty) { emitMultiple(out, buffer) buffer = Queue.empty[ByteString] - } else { + } else // Iff the buffer is empty, we haven't consumed any values yet // and thus we still need to fulfill downstream need. tryPull(in) - } } override def onUpstreamFinish(): Unit = diff --git a/kinesis/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes b/kinesis/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes new file mode 100644 index 000000000..aa210838e --- /dev/null +++ b/kinesis/src/main/mima-filters/1.1.x.backwards.excludes/final-class.backwards.excludes @@ -0,0 +1,5 @@ +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.kinesis.KinesisErrors$FailurePublishingRecords") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.kinesis.ShardIterator$AfterSequenceNumber") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.kinesis.ShardIterator$AtSequenceNumber") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.kinesis.ShardIterator$AtTimestamp") +ProblemFilters.exclude[FinalClassProblem]("org.apache.pekko.stream.connectors.kinesisfirehose.KinesisFirehoseErrors$FailurePublishingRecords") diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/CommittableRecord.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/CommittableRecord.scala index e99165767..312fe7a81 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/CommittableRecord.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/CommittableRecord.scala @@ -58,12 +58,11 @@ abstract class CommittableRecord @InternalApi private[kinesis] ( * See [[software.amazon.kinesis.processor.RecordProcessorCheckpointer]] */ def tryToCheckpoint(): Unit = - if (canBeCheckpointed) { + if (canBeCheckpointed) try forceCheckpoint() catch { case _: ShutdownException => () } - } /** * Basic checkpoint method, the caller should decide if it's safe diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisErrors.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisErrors.scala index 8b2de3f19..526b6f444 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisErrors.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisErrors.scala @@ -28,7 +28,7 @@ object KinesisErrors { with KinesisSourceError sealed trait KinesisFlowErrors extends NoStackTrace - case class FailurePublishingRecords(e: Throwable) + final case class FailurePublishingRecords(e: Throwable) extends RuntimeException(s"Failure publishing records to Kinesis. Reason : ${e.getMessage}", e) with KinesisFlowErrors } diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSettings.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSettings.scala index 3926f3cf5..8f985aac3 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSettings.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSettings.scala @@ -20,7 +20,7 @@ final class KinesisFlowSettings private (val parallelism: Int, require( maxBatchSize >= 1 && maxBatchSize <= 500, - "Limit must be between 1 and 500. See: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html") + "Limit must be between 1 and 500. See: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html") require(maxRecordsPerSecond >= 1) require(maxBytesPerSecond >= 1) @@ -39,7 +39,7 @@ final class KinesisFlowSettings private (val parallelism: Int, maxRecordsPerSecond = maxRecordsPerSecond, maxBytesPerSecond = maxBytesPerSecond) - override def toString = + override def toString: String = "KinesisFlowSettings(" + s"parallelism=$parallelism," + s"maxBatchSize=$maxBatchSize," + diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala index 1716bfd87..3a064eada 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala @@ -40,7 +40,7 @@ object ShardIterator { override final val shardIteratorType: ShardIteratorType = ShardIteratorType.TRIM_HORIZON } - case class AtTimestamp private[kinesis] (value: Instant) extends ShardIterator { + final case class AtTimestamp private[kinesis] (value: Instant) extends ShardIterator { override final val timestamp: Option[Instant] = Some(value) override final val startingSequenceNumber: Option[String] = None @@ -48,7 +48,7 @@ object ShardIterator { override final val shardIteratorType: ShardIteratorType = ShardIteratorType.AT_TIMESTAMP } - case class AtSequenceNumber(sequenceNumber: String) extends ShardIterator { + final case class AtSequenceNumber(sequenceNumber: String) extends ShardIterator { override final val timestamp: Option[Instant] = None override final val startingSequenceNumber: Option[String] = Some(sequenceNumber) @@ -56,7 +56,7 @@ object ShardIterator { override final val shardIteratorType: ShardIteratorType = ShardIteratorType.AT_SEQUENCE_NUMBER } - case class AfterSequenceNumber(sequenceNumber: String) extends ShardIterator { + final case class AfterSequenceNumber(sequenceNumber: String) extends ShardIterator { override final val timestamp: Option[Instant] = None override final val startingSequenceNumber: Option[String] = Some(sequenceNumber) diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardSettings.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardSettings.scala index 5e028575c..33e7bc2ac 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardSettings.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardSettings.scala @@ -28,7 +28,7 @@ final class ShardSettings private ( val limit: Int) { require( limit >= 1 && limit <= 10000, - "Limit must be between 0 and 10000. See: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html") + "Limit must be between 0 and 10000. See: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html") shardIteratorType match { case ShardIteratorType.AFTER_SEQUENCE_NUMBER | ShardIteratorType.AT_SEQUENCE_NUMBER => require( @@ -71,7 +71,7 @@ final class ShardSettings private ( refreshInterval = refreshInterval, limit = limit) - override def toString = + override def toString: String = "ShardSettings(" + s"streamName=$streamName," + s"shardId=$shardId," + diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSchedulerSourceStage.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSchedulerSourceStage.scala index 527cebf93..592afef03 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSchedulerSourceStage.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSchedulerSourceStage.scala @@ -22,7 +22,7 @@ import pekko.stream.connectors.kinesis.{ CommittableRecord, KinesisSchedulerSour import pekko.stream.stage._ import pekko.stream.{ ActorAttributes, Attributes, Outlet, SourceShape } import software.amazon.kinesis.coordinator.Scheduler -import software.amazon.kinesis.processor.{ ShardRecordProcessor, ShardRecordProcessorFactory } +import software.amazon.kinesis.processor.ShardRecordProcessorFactory import scala.annotation.tailrec import scala.collection.mutable @@ -37,8 +37,8 @@ private[kinesis] object KinesisSchedulerSourceStage { sealed trait Command final case class NewRecord(cr: CommittableRecord) extends Command - final case object Pump extends Command - final case object Complete extends Command + case object Pump extends Command + case object Complete extends Command final case class SchedulerShutdown(result: Try[_]) extends Command } @@ -64,7 +64,8 @@ private[kinesis] class KinesisSchedulerSourceStage( new Logic(matValue) -> matValue.future } - final class Logic(matValue: Promise[Scheduler]) extends GraphStageLogic(shape) with StageLogging with OutHandler { + private final class Logic(matValue: Promise[Scheduler]) extends GraphStageLogic(shape) with StageLogging + with OutHandler { setHandler(out, this) import KinesisSchedulerSourceStage._ @@ -78,10 +79,7 @@ private[kinesis] class KinesisSchedulerSourceStage( override def preStart(): Unit = { implicit val ec: ExecutionContext = executionContext(attributes) - val scheduler = schedulerBuilder(new ShardRecordProcessorFactory { - override def shardRecordProcessor(): ShardRecordProcessor = - new ShardProcessor(newRecordCallback) - }) + val scheduler = schedulerBuilder(() => new ShardProcessor(newRecordCallback)) schedulerOpt = Some(scheduler) Future(scheduler.run()).onComplete(result => callback.invoke(SchedulerShutdown(result))) matValue.success(scheduler) @@ -115,7 +113,7 @@ private[kinesis] class KinesisSchedulerSourceStage( schedulerOpt.foreach(scheduler => if (!scheduler.shutdownComplete()) scheduler.shutdown()) - protected def executionContext(attributes: Attributes): ExecutionContext = { + private def executionContext(attributes: Attributes): ExecutionContext = { val dispatcherId = (attributes.get[ActorAttributes.Dispatcher](ActorAttributes.IODispatcher) match { case ActorAttributes.Dispatcher("") => ActorAttributes.IODispatcher diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala index fd06bb01d..516dc3cf1 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala @@ -43,9 +43,9 @@ private[kinesis] object KinesisSourceStage { private[kinesis] final case class GetRecordsFailure(ex: Throwable) - private[kinesis] final case object Pump + private[kinesis] case object Pump - private[kinesis] final case object GetRecords + private[kinesis] case object GetRecords } @@ -102,16 +102,14 @@ private[kinesis] class KinesisSourceStage(shardSettings: ShardSettings, amazonKi if (result.nextShardIterator == null) { log.info("Shard {} returned a null iterator and will now complete.", shardId) completeStage() - } else { + } else currentShardIterator = result.nextShardIterator - } if (records.nonEmpty) { records.foreach(buffer.enqueue(_)) self.become(ready) self.ref ! Pump - } else { + } else scheduleOnce(GetRecords, refreshInterval) - } case (_, GetRecordsFailure(ex)) => val error = new Errors.GetRecordsError(shardId, ex) diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/ShardProcessor.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/ShardProcessor.scala index bc2b3d9d9..e118a6abb 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/ShardProcessor.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/ShardProcessor.scala @@ -64,9 +64,8 @@ private[kinesis] class ShardProcessor( processRecordsInput.isAtShardEnd, processRecordsInput.millisBehindLatest) - if (batchData.isAtShardEnd) { + if (batchData.isAtShardEnd) lastRecordSemaphore.acquire() - } val numberOfRecords = processRecordsInput.records().size() processRecordsInput.records().asScala.zipWithIndex.foreach { @@ -100,7 +99,7 @@ private[kinesis] class ShardProcessor( shutdown = Some(ShutdownReason.REQUESTED) } - final class InternalCommittableRecord(record: KinesisClientRecord, batchData: BatchData, lastRecord: Boolean) + private final class InternalCommittableRecord(record: KinesisClientRecord, batchData: BatchData, lastRecord: Boolean) extends CommittableRecord(record, batchData, shardData) { private def checkpoint(): Unit = { checkpointer.checkpoint(sequenceNumber, subSequenceNumber) diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseErrors.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseErrors.scala index 81e601e68..eddcfd899 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseErrors.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseErrors.scala @@ -22,6 +22,6 @@ object KinesisFirehoseErrors { case object GetRecordsError extends KinesisSourceError sealed trait KinesisFlowErrors extends NoStackTrace - case class FailurePublishingRecords(e: Throwable) extends RuntimeException(e) with KinesisFlowErrors + final case class FailurePublishingRecords(e: Throwable) extends RuntimeException(e) with KinesisFlowErrors } diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSettings.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSettings.scala index 504677e99..2d49eddbb 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSettings.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSettings.scala @@ -39,7 +39,7 @@ final class KinesisFirehoseFlowSettings private (val parallelism: Int, maxRecordsPerSecond = maxRecordsPerSecond, maxBytesPerSecond = maxBytesPerSecond) - override def toString = + override def toString: String = "KinesisFirehoseFlowSettings(" + s"parallelism=$parallelism," + s"maxBatchSize=$maxBatchSize," + diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala index cbf3e4f5e..d635ceceb 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala @@ -44,7 +44,7 @@ object KinesisFirehoseFlow { .records(records.asJavaCollection) .build()) .asScala - .transform(identity, FailurePublishingRecords(_))(parasitic)) + .transform(identity, FailurePublishingRecords)(parasitic)) .mapConcat(_.requestResponses.asScala.toIndexedSeq) private def getByteSize(record: Record): Int = record.data.asByteBuffer.position diff --git a/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala b/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala index acf0851d1..050ef6495 100644 --- a/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala +++ b/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala @@ -64,9 +64,8 @@ object KinesisFirehoseSnippets { // #error-handling val flowWithErrors: Flow[Record, PutRecordBatchResponseEntry, NotUsed] = KinesisFirehoseFlow("streamName") .map { response => - if (response.errorCode() != null) { + if (response.errorCode() != null) throw new RuntimeException(response.errorCode()) - } response } // #error-handling diff --git a/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala b/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala index 33c037586..9a606eec5 100644 --- a/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala +++ b/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala @@ -106,9 +106,8 @@ object KinesisSnippets { // #error-handling val flowWithErrors: Flow[PutRecordsRequestEntry, PutRecordsResultEntry, NotUsed] = KinesisFlow("myStreamName") .map { response => - if (response.errorCode() ne null) { + if (response.errorCode() ne null) throw new RuntimeException(response.errorCode()) - } response } diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala index d124840a4..81cf0a25d 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala @@ -27,7 +27,6 @@ import pekko.util.ByteString import org.mockito.ArgumentMatchers.any import org.mockito.Mockito.when import org.mockito.invocation.InvocationOnMock -import org.mockito.stubbing.Answer import org.scalatest.wordspec.AnyWordSpec import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.core.SdkBytes @@ -138,17 +137,15 @@ class KinesisFlowSpec extends AnyWordSpec with Matchers with KinesisMock with Lo trait WithPutRecordsSuccess { self: Settings => val publishedRecord = PutRecordsResultEntry.builder().build() - when(amazonKinesisAsync.putRecords(any[PutRecordsRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock) = { - val request = invocation - .getArgument[PutRecordsRequest](0) - val result = PutRecordsResponse - .builder() - .failedRecordCount(0) - .records(request.records.asScala.map(_ => publishedRecord).asJava) - .build() - CompletableFuture.completedFuture(result) - } + when(amazonKinesisAsync.putRecords(any[PutRecordsRequest])).thenAnswer((invocation: InvocationOnMock) => { + val request = invocation + .getArgument[PutRecordsRequest](0) + val result = PutRecordsResponse + .builder() + .failedRecordCount(0) + .records(request.records.asScala.map(_ => publishedRecord).asJava) + .build() + CompletableFuture.completedFuture(result) }) } diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSourceSpec.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSourceSpec.scala index db6b5f4f3..4ff5e5211 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSourceSpec.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSourceSpec.scala @@ -26,7 +26,6 @@ import pekko.util.ByteString import org.mockito.ArgumentMatchers._ import org.mockito.Mockito._ import org.mockito.invocation.InvocationOnMock -import org.mockito.stubbing.Answer import org.scalatest.wordspec.AnyWordSpec import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.core.SdkBytes @@ -184,33 +183,25 @@ class KinesisSourceSpec extends AnyWordSpec with Matchers with KinesisMock with } trait WithGetShardIteratorSuccess { self: KinesisSpecContext => - when(amazonKinesisAsync.getShardIterator(any[GetShardIteratorRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock): AnyRef = - CompletableFuture.completedFuture(getShardIteratorResult) - }) + when(amazonKinesisAsync.getShardIterator(any[GetShardIteratorRequest])).thenAnswer((_: InvocationOnMock) => + CompletableFuture.completedFuture(getShardIteratorResult)) } trait WithGetShardIteratorFailure { self: KinesisSpecContext => - when(amazonKinesisAsync.getShardIterator(any[GetShardIteratorRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock): AnyRef = - CompletableFuture.completedFuture(getShardIteratorResult) - }) + when(amazonKinesisAsync.getShardIterator(any[GetShardIteratorRequest])).thenAnswer((_: InvocationOnMock) => + CompletableFuture.completedFuture(getShardIteratorResult)) } trait WithGetRecordsSuccess { self: KinesisSpecContext => - when(amazonKinesisAsync.getRecords(any[GetRecordsRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock) = - CompletableFuture.completedFuture(getRecordsResult) - }) + when(amazonKinesisAsync.getRecords(any[GetRecordsRequest])).thenAnswer((_: InvocationOnMock) => + CompletableFuture.completedFuture(getRecordsResult)) } trait WithGetRecordsFailure { self: KinesisSpecContext => - when(amazonKinesisAsync.getRecords(any[GetRecordsRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock) = { - val future = new CompletableFuture[GetRecordsResponse]() - future.completeExceptionally(new Exception("fail")) - future - } + when(amazonKinesisAsync.getRecords(any[GetRecordsRequest])).thenAnswer((_: InvocationOnMock) => { + val future = new CompletableFuture[GetRecordsResponse]() + future.completeExceptionally(new Exception("fail")) + future }) } } diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/Valve.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/Valve.scala index c9d039ce9..128267bab 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/Valve.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/Valve.scala @@ -38,7 +38,7 @@ sealed trait ValveSwitch { * * @return A future that completes with [[SwitchMode]] to indicate the current state of the valve */ - def getMode(): Future[SwitchMode] + def getMode: Future[SwitchMode] } object Valve { @@ -105,11 +105,10 @@ final class Valve[A](mode: SwitchMode) extends GraphStageWithMaterializedValue[F true case Close => - if (isAvailable(in)) { + if (isAvailable(in)) push(out, grab(in)) - } else if (isAvailable(out) && !hasBeenPulled(in)) { + else if (isAvailable(out) && !hasBeenPulled(in)) pull(in) - } mode = SwitchMode.Open true @@ -127,7 +126,7 @@ final class Valve[A](mode: SwitchMode) extends GraphStageWithMaterializedValue[F promise.future } - override def getMode(): Future[SwitchMode] = { + override def getMode: Future[SwitchMode] = { val promise = Promise[SwitchMode]() getModeCallback.invoke(promise) promise.future @@ -137,18 +136,16 @@ final class Valve[A](mode: SwitchMode) extends GraphStageWithMaterializedValue[F setHandlers(in, out, this) override def onPush(): Unit = - if (isOpen) { + if (isOpen) push(out, grab(in)) - } override def onPull(): Unit = - if (isOpen) { + if (isOpen) pull(in) - } private def isOpen = mode == SwitchMode.Open - override def preStart() = + override def preStart(): Unit = promise.success(switch) } diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSpec.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSpec.scala index 7f52428c1..8ff8ebae1 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSpec.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesisfirehose/KinesisFirehoseFlowSpec.scala @@ -27,7 +27,6 @@ import pekko.util.ByteString import org.mockito.ArgumentMatchers.any import org.mockito.Mockito.when import org.mockito.invocation.InvocationOnMock -import org.mockito.stubbing.Answer import org.scalatest.wordspec.AnyWordSpec import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.core.SdkBytes @@ -88,8 +87,8 @@ class KinesisFirehoseFlowSpec extends AnyWordSpec with Matchers with KinesisFire } trait WithPutRecordsSuccess { self: KinesisFirehoseFlowProbe => - when(amazonKinesisFirehoseAsync.putRecordBatch(any[PutRecordBatchRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock) = { + when(amazonKinesisFirehoseAsync.putRecordBatch(any[PutRecordBatchRequest])).thenAnswer( + (invocation: InvocationOnMock) => { val request = invocation .getArgument[PutRecordBatchRequest](0) val result = PutRecordBatchResponse @@ -98,17 +97,14 @@ class KinesisFirehoseFlowSpec extends AnyWordSpec with Matchers with KinesisFire .requestResponses(request.records.asScala.map(_ => publishedRecord).asJava) .build() CompletableFuture.completedFuture(result) - } - }) + }) } trait WithPutRecordsFailure { self: KinesisFirehoseFlowProbe => - when(amazonKinesisFirehoseAsync.putRecordBatch(any[PutRecordBatchRequest])).thenAnswer(new Answer[AnyRef] { - override def answer(invocation: InvocationOnMock) = { - val future = new CompletableFuture() - future.completeExceptionally(requestError) - future - } + when(amazonKinesisFirehoseAsync.putRecordBatch(any[PutRecordBatchRequest])).thenAnswer((_: InvocationOnMock) => { + val future = new CompletableFuture() + future.completeExceptionally(requestError) + future }) } diff --git a/kudu/src/main/mima-filters/1.1.x.backwards.excludes/KuduClientExt-more-specific-type.backwards.excludes b/kudu/src/main/mima-filters/1.1.x.backwards.excludes/KuduClientExt-more-specific-type.backwards.excludes new file mode 100644 index 000000000..bea10fc07 --- /dev/null +++ b/kudu/src/main/mima-filters/1.1.x.backwards.excludes/KuduClientExt-more-specific-type.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.kudu.KuduClientExt.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.kudu.KuduClientExt.lookup") \ No newline at end of file diff --git a/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduClientExt.scala b/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduClientExt.scala index b1abf6b39..dae041f99 100644 --- a/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduClientExt.scala +++ b/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduClientExt.scala @@ -21,7 +21,7 @@ import org.apache.kudu.client.KuduClient * Manages one [[org.apache.kudu.client.KuduClient]] per `ActorSystem`. */ final class KuduClientExt private (sys: ExtendedActorSystem) extends Extension { - val client = { + val client: KuduClient = { val masterAddress = sys.settings.config.getString("pekko.connectors.kudu.master-address") new KuduClient.KuduClientBuilder(masterAddress).build } @@ -30,7 +30,7 @@ final class KuduClientExt private (sys: ExtendedActorSystem) extends Extension { } object KuduClientExt extends ExtensionId[KuduClientExt] with ExtensionIdProvider { - override def lookup = KuduClientExt + override def lookup: KuduClientExt.type = KuduClientExt override def createExtension(system: ExtendedActorSystem) = new KuduClientExt(system) /** diff --git a/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/impl/KuduFlowStage.scala b/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/impl/KuduFlowStage.scala index 8f28fc9ab..f829afee4 100644 --- a/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/impl/KuduFlowStage.scala +++ b/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/impl/KuduFlowStage.scala @@ -21,7 +21,7 @@ import pekko.stream.stage._ import pekko.util.ccompat.JavaConverters._ import org.apache.kudu.Schema import org.apache.kudu.Type._ -import org.apache.kudu.client.{ KuduClient, KuduTable, PartialRow } +import org.apache.kudu.client.{ KuduClient, KuduSession, KuduTable, PartialRow } import scala.util.control.NonFatal @@ -38,9 +38,9 @@ private[kudu] class KuduFlowStage[A](settings: KuduTableSettings[A], kuduClient: private val in = Inlet[A]("messages") private val out = Outlet[A]("result") - override val shape = FlowShape(in, out) + override val shape: FlowShape[A, A] = FlowShape(in, out) - def copyToInsertRow(insertPartialRow: PartialRow, partialRow: PartialRow, schema: Schema): Unit = + private def copyToInsertRow(insertPartialRow: PartialRow, partialRow: PartialRow, schema: Schema): Unit = schema.getColumns.asScala.foreach { cSch => val columnName = cSch.getName val kuduType = cSch.getType @@ -54,19 +54,19 @@ private[kudu] class KuduFlowStage[A](settings: KuduTableSettings[A], kuduClient: case BOOL => insertPartialRow.addBoolean(columnName, partialRow.getBoolean(columnName)) case FLOAT => insertPartialRow.addFloat(columnName, partialRow.getFloat(columnName)) case DOUBLE => insertPartialRow.addDouble(columnName, partialRow.getDouble(columnName)) - case _ => throw new UnsupportedOperationException(s"Unknown type ${kuduType}") + case _ => throw new UnsupportedOperationException(s"Unknown type $kuduType") } } override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with StageLogging with KuduCapabilities with OutHandler with InHandler { - override protected def logSource = classOf[KuduFlowStage[A]] + override protected def logSource: Class[KuduFlowStage[A]] = classOf[KuduFlowStage[A]] lazy val table: KuduTable = getOrCreateTable(kuduClient, settings.tableName, settings.schema, settings.createTableOptions) - val session = kuduClient.newSession() + val session: KuduSession = kuduClient.newSession() setHandlers(in, out, this) @@ -75,7 +75,7 @@ private[kudu] class KuduFlowStage[A](settings: KuduTableSettings[A], kuduClient: override def onPush(): Unit = { val msg = grab(in) val insert = table.newUpsert() - val partialRow = insert.getRow() + val partialRow = insert.getRow copyToInsertRow(partialRow, settings.converter(msg), table.getSchema) session.apply(insert) push(out, msg) diff --git a/mongodb/src/main/scala/org/apache/pekko/stream/connectors/mongodb/DocumentUpdate.scala b/mongodb/src/main/scala/org/apache/pekko/stream/connectors/mongodb/DocumentUpdate.scala index e91bc44fa..a4f0a51aa 100644 --- a/mongodb/src/main/scala/org/apache/pekko/stream/connectors/mongodb/DocumentUpdate.scala +++ b/mongodb/src/main/scala/org/apache/pekko/stream/connectors/mongodb/DocumentUpdate.scala @@ -37,10 +37,10 @@ final class DocumentUpdate private (val filter: Bson, val update: Bson) { } object DocumentUpdate { - def apply(filter: Bson, update: Bson) = new DocumentUpdate(filter, update) + def apply(filter: Bson, update: Bson): DocumentUpdate = new DocumentUpdate(filter, update) /** * Java Api */ - def create(filter: Bson, update: Bson) = DocumentUpdate(filter, update) + def create(filter: Bson, update: Bson): DocumentUpdate = DocumentUpdate(filter, update) } diff --git a/mongodb/src/test/java/docs/javadsl/MongoSourceTest.java b/mongodb/src/test/java/docs/javadsl/MongoSourceTest.java index 42313cee9..30e9f096b 100644 --- a/mongodb/src/test/java/docs/javadsl/MongoSourceTest.java +++ b/mongodb/src/test/java/docs/javadsl/MongoSourceTest.java @@ -37,6 +37,7 @@ import java.util.stream.IntStream; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; public class MongoSourceTest { @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); @@ -141,13 +142,11 @@ public void supportMultipleMaterializations() throws Exception { public void streamTheResultOfMongoQueryThatResultsInNoData() throws Exception { final Source source = MongoSource.create(numbersDocumentColl.find()); - assertEquals( - true, - source - .runWith(Sink.seq(), system) - .toCompletableFuture() - .get(5, TimeUnit.SECONDS) - .isEmpty()); + assertTrue(source + .runWith(Sink.seq(), system) + .toCompletableFuture() + .get(5, TimeUnit.SECONDS) + .isEmpty()); } private List seed() throws Exception { diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/BehaviorRunner.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/BehaviorRunner.scala index 7d62945d1..f39ec37bd 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/BehaviorRunner.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/BehaviorRunner.scala @@ -35,19 +35,17 @@ object BehaviorRunner { case (b, StoredMessage(msg)) => val nextBehavior = Behavior.interpretMessage(b, context, msg) - if ((nextBehavior ne Behaviors.same) && (nextBehavior ne Behaviors.unhandled)) { + if ((nextBehavior ne Behaviors.same) && (nextBehavior ne Behaviors.unhandled)) nextBehavior - } else { + else b - } case (b, StoredSignal(signal)) => val nextBehavior = Behavior.interpretSignal(b, context, signal) - if ((nextBehavior ne Behaviors.same) && (nextBehavior ne Behaviors.unhandled)) { + if ((nextBehavior ne Behaviors.same) && (nextBehavior ne Behaviors.unhandled)) nextBehavior - } else { + else b - } } } diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ClientState.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ClientState.scala index 0372fdff0..09355f935 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ClientState.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ClientState.scala @@ -36,7 +36,7 @@ import scala.util.{ Either, Failure, Success } */ @InternalApi private[streaming] object ClientConnector { - type ConnectData = Option[_] + private type ConnectData = Option[_] /* * No ACK received - the CONNECT failed @@ -130,7 +130,7 @@ import scala.util.{ Either, Failure, Success } subscriberPacketRouter, unsubscriberPacketRouter, settings) - final case class ConnAckReceived( + private final case class ConnAckReceived( connectionId: ByteString, connectFlags: ConnectFlags, keepAlive: FiniteDuration, @@ -157,8 +157,6 @@ import scala.util.{ Either, Failure, Success } unsubscriberPacketRouter, settings) - final case class WaitingForQueueOfferResult(nextBehavior: Behavior[Event], stash: Seq[Event]) - sealed abstract class Event(val connectionId: ByteString) final case class ConnectReceivedLocally(override val connectionId: ByteString, @@ -171,9 +169,9 @@ import scala.util.{ Either, Failure, Success } local: Promise[ForwardConnAck]) extends Event(connectionId) - case class ReceiveConnAckTimeout(override val connectionId: ByteString) extends Event(connectionId) + final case class ReceiveConnAckTimeout(override val connectionId: ByteString) extends Event(connectionId) - case class ConnectionLost(override val connectionId: ByteString) extends Event(connectionId) + final case class ConnectionLost(override val connectionId: ByteString) extends Event(connectionId) final case class DisconnectReceivedLocally(override val connectionId: ByteString, remote: Promise[ForwardDisconnect.type]) @@ -197,7 +195,7 @@ import scala.util.{ Either, Failure, Success } final case class ProducerFree(topicName: String) extends Event(ByteString.empty) - case class SendPingReqTimeout(override val connectionId: ByteString) extends Event(connectionId) + private final case class SendPingReqTimeout(override val connectionId: ByteString) extends Event(connectionId) final case class PingRespReceivedFromRemote(override val connectionId: ByteString, local: Promise[ForwardPingResp.type]) @@ -323,7 +321,7 @@ import scala.util.{ Either, Failure, Success } data.stash.map(BehaviorRunner.StoredMessage.apply)) } - def serverConnect(data: ConnectReceived)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { + private def serverConnect(data: ConnectReceived)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { val ReceiveConnAck = "receive-connack" timer => @@ -390,7 +388,7 @@ import scala.util.{ Either, Failure, Success } } - def serverConnected(data: ConnAckReceived, + private def serverConnected(data: ConnAckReceived, resetPingReqTimer: Boolean = true)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { timer => val SendPingreq = "send-pingreq" @@ -474,11 +472,10 @@ import scala.util.{ Either, Failure, Success } pendingRemotePublications = data.pendingRemotePublications.take(i) ++ data.pendingRemotePublications.drop(i + 1)), resetPingReqTimer = true) - } else { + } else serverConnected(data.copy(activeConsumers = data.activeConsumers - topicName), resetPingReqTimer = true) - } - case (context, PublishReceivedLocally(publish, _)) + case (_, PublishReceivedLocally(publish, _)) if (publish.flags & ControlPacketFlags.QoSReserved).underlying == 0 => QueueOfferState.waitForQueueOfferCompleted( data.remote.offer(ForwardPublish(publish, None)), @@ -501,11 +498,10 @@ import scala.util.{ Either, Failure, Success } context.watch(producer) serverConnected(data.copy(activeProducers = data.activeProducers + (publish.topicName -> producer)), resetPingReqTimer = true) - } else { + } else serverConnected( data.copy(pendingLocalPublications = data.pendingLocalPublications :+ (publish.topicName -> prl)), resetPingReqTimer = true) - } case (context, ProducerFree(topicName)) => val i = data.pendingLocalPublications.indexWhere(_._1 == topicName) @@ -528,11 +524,10 @@ import scala.util.{ Either, Failure, Success } pendingLocalPublications = data.pendingLocalPublications.take(i) ++ data.pendingLocalPublications.drop(i + 1)), resetPingReqTimer = true) - } else { + } else serverConnected(data.copy(activeProducers = data.activeProducers - topicName), resetPingReqTimer = true) - } - case (context, ReceivedProducerPublishingCommand(Producer.ForwardPublish(publish, packetId))) => + case (_, ReceivedProducerPublishingCommand(Producer.ForwardPublish(publish, packetId))) => QueueOfferState.waitForQueueOfferCompleted( data.remote .offer(ForwardPublish(publish, packetId)), @@ -540,7 +535,7 @@ import scala.util.{ Either, Failure, Success } serverConnected(data, resetPingReqTimer = false), stash = Vector.empty) - case (context, ReceivedProducerPublishingCommand(Producer.ForwardPubRel(_, packetId))) => + case (_, ReceivedProducerPublishingCommand(Producer.ForwardPubRel(_, packetId))) => QueueOfferState.waitForQueueOfferCompleted( data.remote .offer(ForwardPubRel(packetId)), @@ -629,7 +624,7 @@ import scala.util.{ Either, Failure, Success } sealed abstract class Event final case class AcquiredPacketId(packetId: PacketId) extends Event - final case object UnobtainablePacketId extends Event + case object UnobtainablePacketId extends Event final case class SubAckReceivedFromRemote(local: Promise[ForwardSubAck]) extends Event case object ReceiveSubAckTimeout extends Event @@ -639,7 +634,7 @@ import scala.util.{ Either, Failure, Success } // State event handling - def prepareServerSubscribe(data: Start): Behavior[Event] = Behaviors.setup { context => + private def prepareServerSubscribe(data: Start): Behavior[Event] = Behaviors.setup { context => val reply = Promise[LocalPacketRouter.Registered]() data.packetRouter ! LocalPacketRouter.Register(context.self, reply) import context.executionContext @@ -712,7 +707,7 @@ import scala.util.{ Either, Failure, Success } sealed abstract class Event final case class AcquiredPacketId(packetId: PacketId) extends Event - final case object UnobtainablePacketId extends Event + case object UnobtainablePacketId extends Event final case class UnsubAckReceivedFromRemote(local: Promise[ForwardUnsubAck]) extends Event case object ReceiveUnsubAckTimeout extends Event @@ -722,7 +717,7 @@ import scala.util.{ Either, Failure, Success } // State event handling - def prepareServerUnsubscribe(data: Start): Behavior[Event] = Behaviors.setup { context => + private def prepareServerUnsubscribe(data: Start): Behavior[Event] = Behaviors.setup { context => val reply = Promise[LocalPacketRouter.Registered]() data.packetRouter ! LocalPacketRouter.Register(context.self, reply) import context.executionContext diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/MqttFrameStage.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/MqttFrameStage.scala index 8ae10093c..b7ed291ee 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/MqttFrameStage.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/MqttFrameStage.scala @@ -25,7 +25,7 @@ import scala.collection.immutable @InternalApi private[streaming] object MqttFrameStage { @tailrec - def frames( + private def frames( maxPacketSize: Int, bytesReceived: ByteString, bytesToEmit: Vector[ByteString]): Either[IllegalStateException, (immutable.Iterable[ByteString], ByteString)] = { @@ -41,12 +41,10 @@ import scala.collection.immutable if (bytesReceived.size >= packetSize) { val (b0, b1) = bytesReceived.splitAt(packetSize) frames(maxPacketSize, b1, bytesToEmit :+ b0) - } else { + } else Right((bytesToEmit, bytesReceived)) - } - } else { + } else Left(new IllegalStateException(s"Max packet size of $maxPacketSize exceeded with $packetSize")) - } case _: Left[BufferUnderflow.type, Int] @unchecked => Right((bytesToEmit, bytesReceived)) } diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferState.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferState.scala index 864da11c9..66c55f14a 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferState.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferState.scala @@ -44,9 +44,8 @@ private[mqtt] object QueueOfferState { val s = stash.map(BehaviorRunner.StoredMessage.apply) - if (result.isCompleted) { + if (result.isCompleted) // optimize for a common case where we were immediately able to enqueue - result.value.get match { case Success(QueueOfferResult.Enqueued) => BehaviorRunner.run(behavior, context, s) @@ -57,7 +56,7 @@ private[mqtt] object QueueOfferState { case Failure(failure) => throw failure } - } else { + else { result.onComplete { r => context.self.tell(f(r)) } diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala index 23b9ab79f..028ac92af 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala @@ -75,7 +75,7 @@ import scala.util.{ Either, Failure, Success } packetRouter: ActorRef[LocalPacketRouter.Request[Event]], override val settings: MqttSessionSettings) extends Data(publish, publishData, settings) - final case class Publishing(remote: SourceQueueWithComplete[ForwardPublishingCommand], + private final case class Publishing(remote: SourceQueueWithComplete[ForwardPublishingCommand], packetId: PacketId, override val publish: Publish, override val publishData: PublishData, @@ -85,11 +85,11 @@ import scala.util.{ Either, Failure, Success } sealed abstract class Event final case class AcquiredPacketId(packetId: PacketId) extends Event - final case object UnacquiredPacketId extends Event - case object ReceivePubAckRecTimeout extends Event + private case object UnacquiredPacketId extends Event + private case object ReceivePubAckRecTimeout extends Event final case class PubAckReceivedFromRemote(local: Promise[ForwardPubAck]) extends Event final case class PubRecReceivedFromRemote(local: Promise[ForwardPubRec]) extends Event - case object ReceivePubCompTimeout extends Event + private case object ReceivePubCompTimeout extends Event final case class PubCompReceivedFromRemote(local: Promise[ForwardPubComp]) extends Event case object ReceiveConnect extends Event @@ -107,7 +107,7 @@ import scala.util.{ Either, Failure, Success } // State event handling - def preparePublish(data: Start)(implicit mat: Materializer): Behavior[Event] = Behaviors.setup { context => + private def preparePublish(data: Start)(implicit mat: Materializer): Behavior[Event] = Behaviors.setup { context => def requestPacketId(): Unit = { val reply = Promise[LocalPacketRouter.Registered]() data.packetRouter ! LocalPacketRouter.Register(context.self.unsafeUpcast, reply) @@ -149,78 +149,80 @@ import scala.util.{ Either, Failure, Success } } } - def publishUnacknowledged(data: Publishing)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { - val ReceivePubackrec = "producer-receive-pubackrec" - timer => - if (data.settings.producerPubAckRecTimeout.toNanos > 0L) - timer.startSingleTimer(ReceivePubackrec, ReceivePubAckRecTimeout, data.settings.producerPubAckRecTimeout) - - Behaviors - .receive[Event] { - case (_, PubAckReceivedFromRemote(local)) - if data.publish.flags.contains(ControlPacketFlags.QoSAtLeastOnceDelivery) => - local.success(ForwardPubAck(data.publishData)) - Behaviors.stopped - - case (_, PubRecReceivedFromRemote(local)) - if data.publish.flags.contains(ControlPacketFlags.QoSAtMostOnceDelivery) => - local.success(ForwardPubRec(data.publishData)) - timer.cancel(ReceivePubackrec) - publishAcknowledged(data) - - case (context, ReceivePubAckRecTimeout | ReceiveConnect) => - QueueOfferState.waitForQueueOfferCompleted( - data.remote - .offer( - ForwardPublish(data.publish.copy(flags = data.publish.flags | ControlPacketFlags.DUP), - Some(data.packetId))), - result => QueueOfferCompleted(result.toEither), - publishUnacknowledged(data), - stash = Vector.empty) - - case _ => - Behaviors.same + private def publishUnacknowledged(data: Publishing)(implicit mat: Materializer): Behavior[Event] = + Behaviors.withTimers { + val ReceivePubackrec = "producer-receive-pubackrec" + timer => + if (data.settings.producerPubAckRecTimeout.toNanos > 0L) + timer.startSingleTimer(ReceivePubackrec, ReceivePubAckRecTimeout, data.settings.producerPubAckRecTimeout) + + Behaviors + .receive[Event] { + case (_, PubAckReceivedFromRemote(local)) + if data.publish.flags.contains(ControlPacketFlags.QoSAtLeastOnceDelivery) => + local.success(ForwardPubAck(data.publishData)) + Behaviors.stopped + + case (_, PubRecReceivedFromRemote(local)) + if data.publish.flags.contains(ControlPacketFlags.QoSAtMostOnceDelivery) => + local.success(ForwardPubRec(data.publishData)) + timer.cancel(ReceivePubackrec) + publishAcknowledged(data) + + case (_, ReceivePubAckRecTimeout | ReceiveConnect) => + QueueOfferState.waitForQueueOfferCompleted( + data.remote + .offer( + ForwardPublish(data.publish.copy(flags = data.publish.flags | ControlPacketFlags.DUP), + Some(data.packetId))), + result => QueueOfferCompleted(result.toEither), + publishUnacknowledged(data), + stash = Vector.empty) + + case _ => + Behaviors.same - } - .receiveSignal { - case (_, PostStop) => - data.remote.complete() - Behaviors.same - } - } + } + .receiveSignal { + case (_, PostStop) => + data.remote.complete() + Behaviors.same + } + } - def publishAcknowledged(data: Publishing)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { - val ReceivePubrel = "producer-receive-pubrel" - timer => - if (data.settings.producerPubCompTimeout.toNanos > 0L) - timer.startSingleTimer(ReceivePubrel, ReceivePubCompTimeout, data.settings.producerPubCompTimeout) - - Behaviors.setup { context => - QueueOfferState.waitForQueueOfferCompleted( - data.remote - .offer(ForwardPubRel(data.publish, data.packetId)), - result => QueueOfferCompleted(result.toEither), - Behaviors - .receiveMessagePartial[Event] { - case PubCompReceivedFromRemote(local) => - local.success(ForwardPubComp(data.publishData)) - Behaviors.stopped - case ReceivePubCompTimeout | ReceiveConnect => - QueueOfferState.waitForQueueOfferCompleted( - data.remote - .offer(ForwardPubRel(data.publish, data.packetId)), - result => QueueOfferCompleted(result.toEither), - publishAcknowledged(data), - stash = Vector.empty) - } - .receiveSignal { - case (_, PostStop) => - data.remote.complete() - Behaviors.same - }, - stash = Vector.empty) - } - } + private def publishAcknowledged(data: Publishing)(implicit mat: Materializer): Behavior[Event] = + Behaviors.withTimers { + val ReceivePubrel = "producer-receive-pubrel" + timer => + if (data.settings.producerPubCompTimeout.toNanos > 0L) + timer.startSingleTimer(ReceivePubrel, ReceivePubCompTimeout, data.settings.producerPubCompTimeout) + + Behaviors.setup { _ => + QueueOfferState.waitForQueueOfferCompleted( + data.remote + .offer(ForwardPubRel(data.publish, data.packetId)), + result => QueueOfferCompleted(result.toEither), + Behaviors + .receiveMessagePartial[Event] { + case PubCompReceivedFromRemote(local) => + local.success(ForwardPubComp(data.publishData)) + Behaviors.stopped + case ReceivePubCompTimeout | ReceiveConnect => + QueueOfferState.waitForQueueOfferCompleted( + data.remote + .offer(ForwardPubRel(data.publish, data.packetId)), + result => QueueOfferCompleted(result.toEither), + publishAcknowledged(data), + stash = Vector.empty) + } + .receiveSignal { + case (_, PostStop) => + data.remote.complete() + Behaviors.same + }, + stash = Vector.empty) + } + } } @@ -233,7 +235,7 @@ import scala.util.{ Either, Failure, Success } /* * No ACK received - the publication failed */ - case class ConsumeFailed(publish: Publish) extends Exception(publish.toString) with NoStackTrace + private final case class ConsumeFailed(publish: Publish) extends Exception(publish.toString) with NoStackTrace /* * Construct with the starting state @@ -260,7 +262,7 @@ import scala.util.{ Either, Failure, Success } override val packetRouter: ActorRef[RemotePacketRouter.Request[Event]], override val settings: MqttSessionSettings) extends Data(publish, clientId, packetId, packetRouter, settings) - final case class ClientConsuming(override val publish: Publish, + private final case class ClientConsuming(override val publish: Publish, override val clientId: Option[String], override val packetId: PacketId, override val packetRouter: ActorRef[RemotePacketRouter.Request[Event]], @@ -268,15 +270,15 @@ import scala.util.{ Either, Failure, Success } extends Data(publish, clientId, packetId, packetRouter, settings) sealed abstract class Event - final case object RegisteredPacketId extends Event - final case object UnobtainablePacketId extends Event + case object RegisteredPacketId extends Event + case object UnobtainablePacketId extends Event final case class PubAckReceivedLocally(remote: Promise[ForwardPubAck.type]) extends Event final case class PubRecReceivedLocally(remote: Promise[ForwardPubRec.type]) extends Event - case object ReceivePubAckRecTimeout extends Event + private case object ReceivePubAckRecTimeout extends Event final case class PubRelReceivedFromRemote(local: Promise[ForwardPubRel.type]) extends Event - case object ReceivePubRelTimeout extends Event + private case object ReceivePubRelTimeout extends Event final case class PubCompReceivedLocally(remote: Promise[ForwardPubComp.type]) extends Event - case object ReceivePubCompTimeout extends Event + private case object ReceivePubCompTimeout extends Event final case class DupPublishReceivedFromRemote(local: Promise[ForwardPublish.type]) extends Event sealed abstract class Command @@ -288,7 +290,7 @@ import scala.util.{ Either, Failure, Success } // State event handling - def prepareClientConsumption(data: Start): Behavior[Event] = Behaviors.setup { context => + private def prepareClientConsumption(data: Start): Behavior[Event] = Behaviors.setup { context => val reply = Promise[RemotePacketRouter.Registered.type]() data.packetRouter ! RemotePacketRouter.Register(context.self.unsafeUpcast, data.clientId, data.packetId, reply) import context.executionContext @@ -310,7 +312,7 @@ import scala.util.{ Either, Failure, Success } } - def consumeUnacknowledged(data: ClientConsuming): Behavior[Event] = Behaviors.withTimers { timer => + private def consumeUnacknowledged(data: ClientConsuming): Behavior[Event] = Behaviors.withTimers { timer => val ReceivePubackrel = "consumer-receive-pubackrel" timer.startSingleTimer(ReceivePubackrel, ReceivePubAckRecTimeout, data.settings.consumerPubAckRecTimeout) Behaviors @@ -330,7 +332,7 @@ import scala.util.{ Either, Failure, Success } } } - def consumeReceived(data: ClientConsuming): Behavior[Event] = Behaviors.withTimers { timer => + private def consumeReceived(data: ClientConsuming): Behavior[Event] = Behaviors.withTimers { timer => val ReceivePubrel = "consumer-receive-pubrel" timer.startSingleTimer(ReceivePubrel, ReceivePubRelTimeout, data.settings.consumerPubRelTimeout) Behaviors @@ -347,7 +349,7 @@ import scala.util.{ Either, Failure, Success } } } - def consumeAcknowledged(data: ClientConsuming): Behavior[Event] = Behaviors.withTimers { timer => + private def consumeAcknowledged(data: ClientConsuming): Behavior[Event] = Behaviors.withTimers { timer => val ReceivePubcomp = "consumer-receive-pubcomp" timer.startSingleTimer(ReceivePubcomp, ReceivePubCompTimeout, data.settings.consumerPubCompTimeout) Behaviors @@ -369,14 +371,15 @@ import scala.util.{ Either, Failure, Success } /* * Raised on routing if a packet id cannot determine an actor to route to */ - case class CannotRoute(packetId: PacketId) extends Exception("packet id: " + packetId.underlying) with NoStackTrace + final case class CannotRoute(packetId: PacketId) extends Exception("packet id: " + packetId.underlying) + with NoStackTrace /* * In case some brokers treat 0 as no packet id, we set our min to 1 * e.g. https://renesasrulz.com/synergy/synergy_tech_notes/f/technical-bulletin-board-notification-postings/8998/mqtt-client-packet-identifier-is-0-by-default-which-causes-azure-iot-hub-to-reset-connection */ - val MinPacketId = PacketId(1) - val MaxPacketId = PacketId(0xFFFF) + val MinPacketId: PacketId = PacketId(1) + val MaxPacketId: PacketId = PacketId(0xFFFF) // Requests @@ -402,11 +405,10 @@ import scala.util.{ Either, Failure, Success } def findNextPacketId[A](registrantsByPacketId: Map[PacketId, Registration[A]], after: PacketId): Option[PacketId] = { @annotation.tailrec def step(c: PacketId): Option[PacketId] = { - if (c.underlying == after.underlying) { + if (c.underlying == after.underlying) // this is a bug, given our guard for entry into `step` checks size. this // means an illegal packet was stored in the map throw new IllegalStateException("Cannot find a free packet id even though one is expected") - } if (c.underlying <= MaxPacketId.underlying && !registrantsByPacketId.contains(c)) Some(c) @@ -422,7 +424,7 @@ import scala.util.{ Either, Failure, Success } step(PacketId(after.underlying + 1)) } - private[streaming] case class Registration[A](registrant: ActorRef[A], failureReplies: Seq[Promise[_]]) + private[streaming] final case class Registration[A](registrant: ActorRef[A], failureReplies: Seq[Promise[_]]) } /* @@ -515,7 +517,8 @@ import scala.util.{ Either, Failure, Success } /* * Raised on routing if a packet id cannot determine an actor to route to */ - case class CannotRoute(packetId: PacketId) extends Exception("packet id: " + packetId.underlying) with NoStackTrace + final case class CannotRoute(packetId: PacketId) extends Exception("packet id: " + packetId.underlying) + with NoStackTrace // Requests @@ -539,7 +542,7 @@ import scala.util.{ Either, Failure, Success } // Replies sealed abstract class Reply - final case object Registered extends Reply + case object Registered extends Reply /* * Construct with the starting state @@ -547,7 +550,7 @@ import scala.util.{ Either, Failure, Success } def apply[A]: Behavior[Request[A]] = new RemotePacketRouter[A].main(Map.empty, Map.empty) - private[streaming] case class Registration[A](registrant: ActorRef[A], failureReplies: Seq[Promise[_]]) + private[streaming] final case class Registration[A](registrant: ActorRef[A], failureReplies: Seq[Promise[_]]) } /* @@ -639,31 +642,29 @@ object Topics { def filter(topicFilterName: String, topicName: String): Boolean = { @tailrec def matchStrings(tfn: String, tn: String): Boolean = - if (tfn == "/+" && tn == "/") { + if (tfn == "/+" && tn == "/") true - } else if (tfn.nonEmpty && tn.nonEmpty) { + else if (tfn.nonEmpty && tn.nonEmpty) { val tfnHead = tfn.charAt(0) val tnHead = tn.charAt(0) - if (tfnHead == '/' && tnHead != '/') { + if (tfnHead == '/' && tnHead != '/') false - } else if (tfnHead == '/' && tn.length == 1) { + else if (tfnHead == '/' && tn.length == 1) matchStrings(tfn, tn.tail) - } else if (tfnHead != '+' && tfnHead != '#' && tfnHead != tnHead) { + else if (tfnHead != '+' && tfnHead != '#' && tfnHead != tnHead) false - } else if (tfnHead == '+') { + else if (tfnHead == '+') matchStrings(tfn.tail, tn.tail.dropWhile(_ != '/')) - } else if (tfnHead == '#') { + else if (tfnHead == '#') matchStrings(tfn.tail, "") - } else { + else matchStrings(tfn.tail, tn.tail) - } - } else if (tfn.isEmpty && tn.isEmpty) { + } else if (tfn.isEmpty && tn.isEmpty) true - } else if (tfn == "/#" && tn.isEmpty) { + else if (tfn == "/#" && tn.isEmpty) true - } else { + else false - } matchStrings(topicFilterName, topicName) } } diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ServerState.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ServerState.scala index 6cd712e5e..dd339deb1 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ServerState.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/ServerState.scala @@ -126,7 +126,7 @@ import scala.util.{ Failure, Success } Behaviors.same } - def listening(data: Data)(implicit mat: Materializer): Behavior[Event] = Behaviors.setup { context => + private def listening(data: Data)(implicit mat: Materializer): Behavior[Event] = Behaviors.setup { context => def childTerminated(terminatedCc: ActorRef[ClientConnection.Event]): Behavior[Event] = data.clientConnections.find { case (_, (_, cc)) => cc == terminatedCc } match { case Some((connectionId, (clientId, _))) => @@ -288,7 +288,7 @@ import scala.util.{ Failure, Success } publisherPacketRouter, unpublisherPacketRouter, settings) - final case class ConnAckReplied( + private final case class ConnAckReplied( connect: Connect, remote: SourceQueueWithComplete[ForwardConnAckCommand], override val stash: Seq[Event], @@ -343,7 +343,7 @@ import scala.util.{ Failure, Success } extends Event final case class SubscribeReceivedFromRemote(subscribe: Subscribe, local: Promise[Publisher.ForwardSubscribe.type]) extends Event - final case class Subscribed(subscribe: Subscribe) extends Event + private final case class Subscribed(subscribe: Subscribe) extends Event final case class PublishReceivedFromRemote(publish: Publish, local: Promise[Consumer.ForwardPublish.type]) extends Event final case class ConsumerFree(topicName: String) extends Event @@ -352,15 +352,15 @@ import scala.util.{ Failure, Success } final case class UnsubscribeReceivedFromRemote(unsubscribe: Unsubscribe, local: Promise[Unpublisher.ForwardUnsubscribe.type]) extends Event - final case class Unsubscribed(unsubscribe: Unsubscribe) extends Event + private final case class Unsubscribed(unsubscribe: Unsubscribe) extends Event final case class PingReqReceivedFromRemote(local: Promise[ForwardPingReq.type]) extends Event final case class DisconnectReceivedFromRemote(local: Promise[ForwardDisconnect.type]) extends Event case object ConnectionLost extends Event - case object ReceivePingReqTimeout extends Event + private case object ReceivePingReqTimeout extends Event final case class ReceivedProducerPublishingCommand(command: Producer.ForwardPublishingCommand) extends Event final case class ConnectReceivedFromRemote(connect: Connect, local: Promise[ClientConnection.ForwardConnect.type]) extends Event - case object ReceiveConnectTimeout extends Event + private case object ReceiveConnectTimeout extends Event final case class QueueOfferCompleted(result: Either[Throwable, QueueOfferResult]) extends Event with QueueOfferState.QueueOfferCompleted @@ -380,7 +380,8 @@ import scala.util.{ Failure, Success } private val ConsumerNamePrefix = "consumer-" private val ProducerNamePrefix = "producer-" - def clientConnect(data: ConnectReceived)(implicit mat: Materializer): Behavior[Event] = Behaviors.setup { context => + private def clientConnect(data: ConnectReceived)( + implicit mat: Materializer): Behavior[Event] = Behaviors.setup { context => context.log.debug("clientConnect stash={}", data.stash) data.local.trySuccess(ForwardConnect) @@ -464,273 +465,202 @@ import scala.util.{ Failure, Success } data.stash.map(BehaviorRunner.StoredMessage.apply)) } - def clientConnected(data: ConnAckReplied)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { - timer => - val ReceivePingreq = "receive-pingreq" - if (data.connect.keepAlive.toMillis > 0) - timer.startSingleTimer(ReceivePingreq, - ReceivePingReqTimeout, - FiniteDuration((data.connect.keepAlive.toMillis * 1.5).toLong, TimeUnit.MILLISECONDS)) - - Behaviors - .receivePartial[Event] { - case (context, SubscribeReceivedFromRemote(subscribe, local)) => - val subscribed = Promise[Done]() - context.watch( - context.spawnAnonymous( - Publisher(data.connect.clientId, - subscribe.packetId, - local, - subscribed, - data.publisherPacketRouter, - data.settings))) - subscribed.future.foreach(_ => context.self ! Subscribed(subscribe))(context.executionContext) - clientConnected(data) - case (_, Subscribed(subscribe)) => - clientConnected( - data.copy( - publishers = data.publishers ++ subscribe.topicFilters.map(_._1))) - case (context, UnsubscribeReceivedFromRemote(unsubscribe, local)) => - val unsubscribed = Promise[Done]() - context.watch( - context.spawnAnonymous( - Unpublisher(data.connect.clientId, - unsubscribe.packetId, - local, - unsubscribed, - data.unpublisherPacketRouter, - data.settings))) - unsubscribed.future.foreach(_ => context.self ! Unsubscribed(unsubscribe))(context.executionContext) - clientConnected(data) - case (_, Unsubscribed(unsubscribe)) => - clientConnected(data.copy(publishers = data.publishers -- unsubscribe.topicFilters)) - case (_, PublishReceivedFromRemote(publish, local)) - if (publish.flags & ControlPacketFlags.QoSReserved).underlying == 0 => - local.success(Consumer.ForwardPublish) - clientConnected(data) - case (context, prfr @ PublishReceivedFromRemote(publish @ Publish(_, topicName, Some(packetId), _), local)) => - data.activeConsumers.get(topicName) match { - case None => + private def clientConnected(data: ConnAckReplied)(implicit mat: Materializer): Behavior[Event] = + Behaviors.withTimers { + timer => + val ReceivePingreq = "receive-pingreq" + if (data.connect.keepAlive.toMillis > 0) + timer.startSingleTimer(ReceivePingreq, + ReceivePingReqTimeout, + FiniteDuration((data.connect.keepAlive.toMillis * 1.5).toLong, TimeUnit.MILLISECONDS)) + + Behaviors + .receivePartial[Event] { + case (context, SubscribeReceivedFromRemote(subscribe, local)) => + val subscribed = Promise[Done]() + context.watch( + context.spawnAnonymous( + Publisher(data.connect.clientId, + subscribe.packetId, + local, + subscribed, + data.publisherPacketRouter, + data.settings))) + subscribed.future.foreach(_ => context.self ! Subscribed(subscribe))(context.executionContext) + clientConnected(data) + case (_, Subscribed(subscribe)) => + clientConnected( + data.copy( + publishers = data.publishers ++ subscribe.topicFilters.map(_._1))) + case (context, UnsubscribeReceivedFromRemote(unsubscribe, local)) => + val unsubscribed = Promise[Done]() + context.watch( + context.spawnAnonymous( + Unpublisher(data.connect.clientId, + unsubscribe.packetId, + local, + unsubscribed, + data.unpublisherPacketRouter, + data.settings))) + unsubscribed.future.foreach(_ => context.self ! Unsubscribed(unsubscribe))(context.executionContext) + clientConnected(data) + case (_, Unsubscribed(unsubscribe)) => + clientConnected(data.copy(publishers = data.publishers -- unsubscribe.topicFilters)) + case (_, PublishReceivedFromRemote(publish, local)) + if (publish.flags & ControlPacketFlags.QoSReserved).underlying == 0 => + local.success(Consumer.ForwardPublish) + clientConnected(data) + case (context, prfr @ PublishReceivedFromRemote(publish @ Publish(_, topicName, Some(packetId), _), + local)) => + data.activeConsumers.get(topicName) match { + case None => + val consumerName = ActorName.mkName(ConsumerNamePrefix + topicName + "-" + context.children.size) + val consumer = + context.spawn(Consumer(publish, + Some(data.connect.clientId), + packetId, + local, + data.consumerPacketRouter, + data.settings), + consumerName) + context.watch(consumer) + clientConnected(data.copy(activeConsumers = data.activeConsumers + (publish.topicName -> consumer))) + case Some(consumer) if publish.flags.contains(ControlPacketFlags.DUP) => + consumer ! Consumer.DupPublishReceivedFromRemote(local) + clientConnected(data) + case Some(_) => + clientConnected( + data.copy( + pendingRemotePublications = data.pendingRemotePublications :+ (publish.topicName -> prfr))) + } + case (context, ConsumerFree(topicName)) => + val i = data.pendingRemotePublications.indexWhere(_._1 == topicName) + if (i >= 0) { + val prfr = data.pendingRemotePublications(i)._2 val consumerName = ActorName.mkName(ConsumerNamePrefix + topicName + "-" + context.children.size) - val consumer = - context.spawn(Consumer(publish, + val consumer = context.spawn( + Consumer(prfr.publish, Some(data.connect.clientId), - packetId, - local, + prfr.publish.packetId.get, + prfr.local, data.consumerPacketRouter, data.settings), - consumerName) + consumerName) context.watch(consumer) - clientConnected(data.copy(activeConsumers = data.activeConsumers + (publish.topicName -> consumer))) - case Some(consumer) if publish.flags.contains(ControlPacketFlags.DUP) => - consumer ! Consumer.DupPublishReceivedFromRemote(local) - clientConnected(data) - case Some(_) => clientConnected( - data.copy(pendingRemotePublications = data.pendingRemotePublications :+ (publish.topicName -> prfr))) - } - case (context, ConsumerFree(topicName)) => - val i = data.pendingRemotePublications.indexWhere(_._1 == topicName) - if (i >= 0) { - val prfr = data.pendingRemotePublications(i)._2 - val consumerName = ActorName.mkName(ConsumerNamePrefix + topicName + "-" + context.children.size) - val consumer = context.spawn( - Consumer(prfr.publish, - Some(data.connect.clientId), - prfr.publish.packetId.get, - prfr.local, - data.consumerPacketRouter, - data.settings), - consumerName) - context.watch(consumer) - clientConnected( - data.copy( - activeConsumers = data.activeConsumers + (topicName -> consumer), - pendingRemotePublications = - data.pendingRemotePublications.take(i) ++ data.pendingRemotePublications.drop(i + 1))) - } else { - clientConnected(data.copy(activeConsumers = data.activeConsumers - topicName)) - } - case (context, PublishReceivedLocally(publish, _)) - if (publish.flags & ControlPacketFlags.QoSReserved).underlying == 0 && - data.publishers.exists(Topics.filter(_, publish.topicName)) => - QueueOfferState.waitForQueueOfferCompleted( - data.remote - .offer(ForwardPublish(publish, None)), - result => QueueOfferCompleted(result.toEither), - clientConnected(data), - stash = Vector.empty) - - case (context, prl @ PublishReceivedLocally(publish, publishData)) - if data.publishers.exists(Topics.filter(_, publish.topicName)) => - val producerName = ActorName.mkName(ProducerNamePrefix + publish.topicName + "-" + context.children.size) - if (!data.activeProducers.contains(publish.topicName)) { - val reply = Promise[Source[Producer.ForwardPublishingCommand, NotUsed]]() - import context.executionContext - reply.future.foreach { - _.runForeach(command => context.self ! ReceivedProducerPublishingCommand(command)) - } - val producer = - context.spawn(Producer(publish, publishData, reply, data.producerPacketRouter, data.settings), - producerName) - context.watch(producer) - clientConnected(data.copy(activeProducers = data.activeProducers + (publish.topicName -> producer))) - } else { - clientConnected( - data.copy(pendingLocalPublications = data.pendingLocalPublications :+ (publish.topicName -> prl))) - } - case (context, ProducerFree(topicName)) => - val i = data.pendingLocalPublications.indexWhere(_._1 == topicName) - if (i >= 0) { - val prl = data.pendingLocalPublications(i)._2 - val producerName = ActorName.mkName(ProducerNamePrefix + topicName + "-" + context.children.size) - val reply = Promise[Source[Producer.ForwardPublishingCommand, NotUsed]]() - import context.executionContext - reply.future.foreach { - _.runForeach(command => context.self ! ReceivedProducerPublishingCommand(command)) - } - val producer = context.spawn( - Producer(prl.publish, prl.publishData, reply, data.producerPacketRouter, data.settings), - producerName) - context.watch(producer) - clientConnected( - data.copy( - activeProducers = data.activeProducers + (topicName -> producer), - pendingLocalPublications = - data.pendingLocalPublications.take(i) ++ data.pendingLocalPublications.drop(i + 1))) - } else { - clientConnected(data.copy(activeProducers = data.activeProducers - topicName)) - } - case (context, ReceivedProducerPublishingCommand(command)) => - val eventualResult = command match { - case Producer.ForwardPublish(publish, packetId) => - data.remote - .offer(ForwardPublish(publish, packetId)) - case Producer.ForwardPubRel(_, packetId) => + data.copy( + activeConsumers = data.activeConsumers + (topicName -> consumer), + pendingRemotePublications = + data.pendingRemotePublications.take(i) ++ data.pendingRemotePublications.drop(i + 1))) + } else + clientConnected(data.copy(activeConsumers = data.activeConsumers - topicName)) + case (_, PublishReceivedLocally(publish, _)) + if (publish.flags & ControlPacketFlags.QoSReserved).underlying == 0 && + data.publishers.exists(Topics.filter(_, publish.topicName)) => + QueueOfferState.waitForQueueOfferCompleted( data.remote - .offer(ForwardPubRel(packetId)) - } - - QueueOfferState.waitForQueueOfferCompleted( - eventualResult, - result => QueueOfferCompleted(result.toEither), - clientConnected(data), - stash = Vector.empty) - case (context, PingReqReceivedFromRemote(local)) => - local.success(ForwardPingReq) - - QueueOfferState.waitForQueueOfferCompleted( - data.remote - .offer(ForwardPingResp), - result => QueueOfferCompleted(result.toEither), - clientConnected(data), - stash = Vector.empty) - - case (context, ReceivePingReqTimeout) => - data.remote.fail(ServerConnector.PingFailed) - timer.cancel(ReceivePingreq) - disconnect(context, data.remote, data) - case (context, DisconnectReceivedFromRemote(local)) => - local.success(ForwardDisconnect) - timer.cancel(ReceivePingreq) - disconnect(context, data.remote, data) - case (context, ClientConnection.ConnectionLost) => - timer.cancel(ReceivePingreq) - disconnect(context, data.remote, data) - case (context, ConnectReceivedFromRemote(connect, local)) - if connect.connectFlags.contains(ConnectFlags.CleanSession) => - context.children.foreach(context.stop) - timer.cancel(ReceivePingreq) - data.remote.complete() - clientConnect( - ConnectReceived( - connect, - local, - Vector.empty, - Set.empty, - Map.empty, - Map.empty, - Vector.empty, - Vector.empty, - data.consumerPacketRouter, - data.producerPacketRouter, - data.publisherPacketRouter, - data.unpublisherPacketRouter, - data.settings)) - case (_, ConnectReceivedFromRemote(connect, local)) => - timer.cancel(ReceivePingreq) - data.remote.complete() - clientConnect( - ConnectReceived( - connect, - local, - Vector.empty, - data.publishers, - data.activeConsumers, - data.activeProducers, - data.pendingLocalPublications, - data.pendingRemotePublications, - data.consumerPacketRouter, - data.producerPacketRouter, - data.publisherPacketRouter, - data.unpublisherPacketRouter, - data.settings)) - } - .receiveSignal { - case (context, ChildFailed(_, failure)) - if failure == Subscriber.SubscribeFailed || - failure == Unsubscriber.UnsubscribeFailed => - data.remote.fail(failure) - disconnect(context, data.remote, data) - case (context, t: Terminated) => - data.activeConsumers.find(_._2 == t.ref) match { - case Some((topic, _)) => - context.self ! ConsumerFree(topic) - case None => - data.activeProducers.find(_._2 == t.ref) match { - case Some((topic, _)) => - context.self ! ProducerFree(topic) - case None => + .offer(ForwardPublish(publish, None)), + result => QueueOfferCompleted(result.toEither), + clientConnected(data), + stash = Vector.empty) + + case (context, prl @ PublishReceivedLocally(publish, publishData)) + if data.publishers.exists(Topics.filter(_, publish.topicName)) => + val producerName = ActorName.mkName(ProducerNamePrefix + publish.topicName + "-" + context.children.size) + if (!data.activeProducers.contains(publish.topicName)) { + val reply = Promise[Source[Producer.ForwardPublishingCommand, NotUsed]]() + import context.executionContext + reply.future.foreach { + _.runForeach(command => context.self ! ReceivedProducerPublishingCommand(command)) } - } - Behaviors.same - case (_, PostStop) => - data.remote.complete() - Behaviors.same - } - } + val producer = + context.spawn(Producer(publish, publishData, reply, data.producerPacketRouter, data.settings), + producerName) + context.watch(producer) + clientConnected(data.copy(activeProducers = data.activeProducers + (publish.topicName -> producer))) + } else + clientConnected( + data.copy(pendingLocalPublications = data.pendingLocalPublications :+ (publish.topicName -> prl))) + case (context, ProducerFree(topicName)) => + val i = data.pendingLocalPublications.indexWhere(_._1 == topicName) + if (i >= 0) { + val prl = data.pendingLocalPublications(i)._2 + val producerName = ActorName.mkName(ProducerNamePrefix + topicName + "-" + context.children.size) + val reply = Promise[Source[Producer.ForwardPublishingCommand, NotUsed]]() + import context.executionContext + reply.future.foreach { + _.runForeach(command => context.self ! ReceivedProducerPublishingCommand(command)) + } + val producer = context.spawn( + Producer(prl.publish, prl.publishData, reply, data.producerPacketRouter, data.settings), + producerName) + context.watch(producer) + clientConnected( + data.copy( + activeProducers = data.activeProducers + (topicName -> producer), + pendingLocalPublications = + data.pendingLocalPublications.take(i) ++ data.pendingLocalPublications.drop(i + 1))) + } else + clientConnected(data.copy(activeProducers = data.activeProducers - topicName)) + case (_, ReceivedProducerPublishingCommand(command)) => + val eventualResult = command match { + case Producer.ForwardPublish(publish, packetId) => + data.remote + .offer(ForwardPublish(publish, packetId)) + case Producer.ForwardPubRel(_, packetId) => + data.remote + .offer(ForwardPubRel(packetId)) + } - def clientDisconnected(data: Disconnected)(implicit mat: Materializer): Behavior[Event] = Behaviors.withTimers { - timer => - val ReceiveConnect = "receive-connect" - if (!timer.isTimerActive(ReceiveConnect)) - timer.startSingleTimer(ReceiveConnect, ReceiveConnectTimeout, data.settings.receiveConnectTimeout) + QueueOfferState.waitForQueueOfferCompleted( + eventualResult, + result => QueueOfferCompleted(result.toEither), + clientConnected(data), + stash = Vector.empty) + case (_, PingReqReceivedFromRemote(local)) => + local.success(ForwardPingReq) - Behaviors - .receivePartial[Event] { - case (context, ConnectReceivedFromRemote(connect, local)) - if connect.connectFlags.contains(ConnectFlags.CleanSession) => - context.children.foreach(context.stop) - timer.cancel(ReceiveConnect) - clientConnect( - ConnectReceived( - connect, - local, - Vector.empty, - Set.empty, - Map.empty, - Map.empty, - Vector.empty, - Vector.empty, - data.consumerPacketRouter, - data.producerPacketRouter, - data.publisherPacketRouter, - data.unpublisherPacketRouter, - data.settings)) - case (context, ConnectReceivedFromRemote(connect, local)) => - timer.cancel(ReceiveConnect) - - BehaviorRunner.run( + QueueOfferState.waitForQueueOfferCompleted( + data.remote + .offer(ForwardPingResp), + result => QueueOfferCompleted(result.toEither), + clientConnected(data), + stash = Vector.empty) + + case (context, ReceivePingReqTimeout) => + data.remote.fail(ServerConnector.PingFailed) + timer.cancel(ReceivePingreq) + disconnect(context, data.remote, data) + case (context, DisconnectReceivedFromRemote(local)) => + local.success(ForwardDisconnect) + timer.cancel(ReceivePingreq) + disconnect(context, data.remote, data) + case (context, ClientConnection.ConnectionLost) => + timer.cancel(ReceivePingreq) + disconnect(context, data.remote, data) + case (context, ConnectReceivedFromRemote(connect, local)) + if connect.connectFlags.contains(ConnectFlags.CleanSession) => + context.children.foreach(context.stop) + timer.cancel(ReceivePingreq) + data.remote.complete() + clientConnect( + ConnectReceived( + connect, + local, + Vector.empty, + Set.empty, + Map.empty, + Map.empty, + Vector.empty, + Vector.empty, + data.consumerPacketRouter, + data.producerPacketRouter, + data.publisherPacketRouter, + data.unpublisherPacketRouter, + data.settings)) + case (_, ConnectReceivedFromRemote(connect, local)) => + timer.cancel(ReceivePingreq) + data.remote.complete() clientConnect( ConnectReceived( connect, @@ -745,25 +675,97 @@ import scala.util.{ Failure, Success } data.producerPacketRouter, data.publisherPacketRouter, data.unpublisherPacketRouter, - data.settings)), - context, - data.stash.map(BehaviorRunner.StoredMessage.apply)) + data.settings)) + } + .receiveSignal { + case (context, ChildFailed(_, failure)) + if failure == Subscriber.SubscribeFailed || + failure == Unsubscriber.UnsubscribeFailed => + data.remote.fail(failure) + disconnect(context, data.remote, data) + case (context, t: Terminated) => + data.activeConsumers.find(_._2 == t.ref) match { + case Some((topic, _)) => + context.self ! ConsumerFree(topic) + case None => + data.activeProducers.find(_._2 == t.ref) match { + case Some((topic, _)) => + context.self ! ProducerFree(topic) + case None => + } + } + Behaviors.same + case (_, PostStop) => + data.remote.complete() + Behaviors.same + } + } - case (_, ReceiveConnectTimeout) => - throw ClientConnectionFailed - case (_, ConnectionLost) => - Behaviors.same // We know... we are disconnected... - case (_, PublishReceivedLocally(publish, _)) - if !data.publishers.exists(Topics.filter(_, publish.topicName)) => - Behaviors.same - case (_, e) => - clientDisconnected(data.copy(stash = data.stash :+ e)) - } - .receiveSignal { - case (_, _: Terminated) => - Behaviors.same - } - } + private def clientDisconnected(data: Disconnected)(implicit mat: Materializer): Behavior[Event] = + Behaviors.withTimers { + timer => + val ReceiveConnect = "receive-connect" + if (!timer.isTimerActive(ReceiveConnect)) + timer.startSingleTimer(ReceiveConnect, ReceiveConnectTimeout, data.settings.receiveConnectTimeout) + + Behaviors + .receivePartial[Event] { + case (context, ConnectReceivedFromRemote(connect, local)) + if connect.connectFlags.contains(ConnectFlags.CleanSession) => + context.children.foreach(context.stop) + timer.cancel(ReceiveConnect) + clientConnect( + ConnectReceived( + connect, + local, + Vector.empty, + Set.empty, + Map.empty, + Map.empty, + Vector.empty, + Vector.empty, + data.consumerPacketRouter, + data.producerPacketRouter, + data.publisherPacketRouter, + data.unpublisherPacketRouter, + data.settings)) + case (context, ConnectReceivedFromRemote(connect, local)) => + timer.cancel(ReceiveConnect) + + BehaviorRunner.run( + clientConnect( + ConnectReceived( + connect, + local, + Vector.empty, + data.publishers, + data.activeConsumers, + data.activeProducers, + data.pendingLocalPublications, + data.pendingRemotePublications, + data.consumerPacketRouter, + data.producerPacketRouter, + data.publisherPacketRouter, + data.unpublisherPacketRouter, + data.settings)), + context, + data.stash.map(BehaviorRunner.StoredMessage.apply)) + + case (_, ReceiveConnectTimeout) => + throw ClientConnectionFailed + case (_, ConnectionLost) => + Behaviors.same // We know... we are disconnected... + case (_, PublishReceivedLocally(publish, _)) + if !data.publishers.exists(Topics.filter(_, publish.topicName)) => + Behaviors.same + case (_, e) => + clientDisconnected(data.copy(stash = data.stash :+ e)) + } + .receiveSignal { + case (_, _: Terminated) => + Behaviors.same + } + } } /* @@ -811,8 +813,8 @@ import scala.util.{ Failure, Success } extends Data(clientId, packetId, subscribed, packetRouter, settings) sealed abstract class Event - final case object RegisteredPacketId extends Event - final case object UnobtainablePacketId extends Event + case object RegisteredPacketId extends Event + case object UnobtainablePacketId extends Event final case class SubAckReceivedLocally(remote: Promise[ForwardSubAck.type]) extends Event case object ReceiveSubAckTimeout extends Event @@ -822,7 +824,7 @@ import scala.util.{ Failure, Success } // State event handling - def preparePublisher(data: Start): Behavior[Event] = Behaviors.setup { context => + private def preparePublisher(data: Start): Behavior[Event] = Behaviors.setup { context => val reply = Promise[RemotePacketRouter.Registered.type]() data.packetRouter ! RemotePacketRouter.Register(context.self.unsafeUpcast, data.clientId, data.packetId, reply) import context.executionContext @@ -905,8 +907,8 @@ import scala.util.{ Failure, Success } extends Data(clientId, packetId, unsubscribed, packetRouter, settings) sealed abstract class Event - final case object RegisteredPacketId extends Event - final case object UnobtainablePacketId extends Event + case object RegisteredPacketId extends Event + case object UnobtainablePacketId extends Event final case class UnsubAckReceivedLocally(remote: Promise[ForwardUnsubAck.type]) extends Event case object ReceiveUnsubAckTimeout extends Event @@ -916,7 +918,7 @@ import scala.util.{ Failure, Success } // State event handling - def prepareServerUnpublisher(data: Start): Behavior[Event] = Behaviors.setup { context => + private def prepareServerUnpublisher(data: Start): Behavior[Event] = Behaviors.setup { context => val reply = Promise[RemotePacketRouter.Registered.type]() data.packetRouter ! RemotePacketRouter.Register(context.self.unsafeUpcast, data.clientId, data.packetId, reply) import context.executionContext diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala index f52addaed..1d7aa00a8 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala @@ -36,22 +36,22 @@ import scala.concurrent.{ ExecutionContext, Promise } * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ object ControlPacketType { - val Reserved1 = ControlPacketType(0) - val CONNECT = ControlPacketType(1) - val CONNACK = ControlPacketType(2) - val PUBLISH = ControlPacketType(3) - val PUBACK = ControlPacketType(4) - val PUBREC = ControlPacketType(5) - val PUBREL = ControlPacketType(6) - val PUBCOMP = ControlPacketType(7) - val SUBSCRIBE = ControlPacketType(8) - val SUBACK = ControlPacketType(9) - val UNSUBSCRIBE = ControlPacketType(10) - val UNSUBACK = ControlPacketType(11) - val PINGREQ = ControlPacketType(12) - val PINGRESP = ControlPacketType(13) - val DISCONNECT = ControlPacketType(14) - val Reserved2 = ControlPacketType(15) + val Reserved1: ControlPacketType = ControlPacketType(0) + val CONNECT: ControlPacketType = ControlPacketType(1) + val CONNACK: ControlPacketType = ControlPacketType(2) + val PUBLISH: ControlPacketType = ControlPacketType(3) + val PUBACK: ControlPacketType = ControlPacketType(4) + val PUBREC: ControlPacketType = ControlPacketType(5) + val PUBREL: ControlPacketType = ControlPacketType(6) + val PUBCOMP: ControlPacketType = ControlPacketType(7) + val SUBSCRIBE: ControlPacketType = ControlPacketType(8) + val SUBACK: ControlPacketType = ControlPacketType(9) + val UNSUBSCRIBE: ControlPacketType = ControlPacketType(10) + val UNSUBACK: ControlPacketType = ControlPacketType(11) + val PINGREQ: ControlPacketType = ControlPacketType(12) + val PINGRESP: ControlPacketType = ControlPacketType(13) + val DISCONNECT: ControlPacketType = ControlPacketType(14) + val Reserved2: ControlPacketType = ControlPacketType(15) } @InternalApi final case class ControlPacketType(underlying: Int) extends AnyVal @@ -61,19 +61,19 @@ final case class ControlPacketType(underlying: Int) extends AnyVal * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ object ControlPacketFlags { - val None = ControlPacketFlags(0) - val ReservedGeneral = ControlPacketFlags(0) - val ReservedPubRel = ControlPacketFlags(1 << 1) - val ReservedSubscribe = ControlPacketFlags(1 << 1) - val ReservedUnsubscribe = ControlPacketFlags(1 << 1) - val ReservedUnsubAck = ControlPacketFlags(1 << 1) - val DUP = ControlPacketFlags(1 << 3) - val QoSAtMostOnceDelivery = ControlPacketFlags(0) - val QoSAtLeastOnceDelivery = ControlPacketFlags(1 << 1) - val QoSExactlyOnceDelivery = ControlPacketFlags(2 << 1) - val QoSReserved = ControlPacketFlags(3 << 1) - val QoSFailure = ControlPacketFlags(1 << 7) - val RETAIN = ControlPacketFlags(1) + val None: ControlPacketFlags = ControlPacketFlags(0) + val ReservedGeneral: ControlPacketFlags = ControlPacketFlags(0) + val ReservedPubRel: ControlPacketFlags = ControlPacketFlags(1 << 1) + val ReservedSubscribe: ControlPacketFlags = ControlPacketFlags(1 << 1) + val ReservedUnsubscribe: ControlPacketFlags = ControlPacketFlags(1 << 1) + val ReservedUnsubAck: ControlPacketFlags = ControlPacketFlags(1 << 1) + val DUP: ControlPacketFlags = ControlPacketFlags(1 << 3) + val QoSAtMostOnceDelivery: ControlPacketFlags = ControlPacketFlags(0) + val QoSAtLeastOnceDelivery: ControlPacketFlags = ControlPacketFlags(1 << 1) + val QoSExactlyOnceDelivery: ControlPacketFlags = ControlPacketFlags(2 << 1) + val QoSReserved: ControlPacketFlags = ControlPacketFlags(3 << 1) + val QoSFailure: ControlPacketFlags = ControlPacketFlags(1 << 7) + val RETAIN: ControlPacketFlags = ControlPacketFlags(1) } @InternalApi @@ -116,14 +116,14 @@ case object Reserved2 extends ControlPacket(ControlPacketType.Reserved2, Control final case class PacketId(underlying: Int) extends AnyVal object ConnectFlags { - val None = ConnectFlags(0) - val Reserved = ConnectFlags(1) - val CleanSession = ConnectFlags(1 << 1) - val WillFlag = ConnectFlags(1 << 2) - val WillQoS = ConnectFlags(3 << 3) - val WillRetain = ConnectFlags(1 << 5) - val PasswordFlag = ConnectFlags(1 << 6) - val UsernameFlag = ConnectFlags(1 << 7) + val None: ConnectFlags = ConnectFlags(0) + val Reserved: ConnectFlags = ConnectFlags(1) + val CleanSession: ConnectFlags = ConnectFlags(1 << 1) + val WillFlag: ConnectFlags = ConnectFlags(1 << 2) + val WillQoS: ConnectFlags = ConnectFlags(3 << 3) + val WillRetain: ConnectFlags = ConnectFlags(1 << 5) + val PasswordFlag: ConnectFlags = ConnectFlags(1 << 6) + val UsernameFlag: ConnectFlags = ConnectFlags(1 << 7) } /** @@ -218,8 +218,8 @@ final case class Connect(protocolName: Connect.ProtocolName, } object ConnAckFlags { - val None = ConnAckFlags(0) - val SessionPresent = ConnAckFlags(1) + val None: ConnAckFlags = ConnAckFlags(0) + val SessionPresent: ConnAckFlags = ConnAckFlags(1) } /** @@ -229,12 +229,12 @@ object ConnAckFlags { final case class ConnAckFlags private[streaming] (underlying: Int) extends AnyVal object ConnAckReturnCode { - val ConnectionAccepted = ConnAckReturnCode(0) - val ConnectionRefusedUnacceptableProtocolVersion = ConnAckReturnCode(1) - val ConnectionRefusedIdentifierRejected = ConnAckReturnCode(2) - val ConnectionRefusedServerUnavailable = ConnAckReturnCode(3) - val ConnectionRefusedBadUsernameOrPassword = ConnAckReturnCode(4) - val ConnectionRefusedNotAuthorized = ConnAckReturnCode(5) + val ConnectionAccepted: ConnAckReturnCode = ConnAckReturnCode(0) + val ConnectionRefusedUnacceptableProtocolVersion: ConnAckReturnCode = ConnAckReturnCode(1) + val ConnectionRefusedIdentifierRejected: ConnAckReturnCode = ConnAckReturnCode(2) + val ConnectionRefusedServerUnavailable: ConnAckReturnCode = ConnAckReturnCode(3) + val ConnectionRefusedBadUsernameOrPassword: ConnAckReturnCode = ConnAckReturnCode(4) + val ConnectionRefusedNotAuthorized: ConnAckReturnCode = ConnAckReturnCode(5) } /** @@ -493,7 +493,7 @@ object MqttCodec { /** * Not enough bytes in the byte iterator */ - final case object BufferUnderflow extends DecodeError + case object BufferUnderflow extends DecodeError /** * Cannot determine the type/flags combination of the control packet @@ -516,7 +516,7 @@ object MqttCodec { /** * Bit 0 of the connect flag was set - which it should not be as it is reserved. */ - final case object ConnectFlagReservedSet extends DecodeError + case object ConnectFlagReservedSet extends DecodeError /** * Something is wrong with the connect message @@ -898,9 +898,8 @@ object MqttCodec { if ((connectAckFlags & 0xFE) == 0) { val resultCode = v.getByte & 0xFF Right(ConnAck(ConnAckFlags(connectAckFlags), ConnAckReturnCode(resultCode))) - } else { + } else Left(ConnectAckFlagReservedBitsSet) - } } catch { case _: NoSuchElementException => Left(BufferUnderflow) } @@ -980,23 +979,21 @@ object MqttCodec { val packetLenAtTopicFilter = v.len val topicFilter = (v.decodeString(), ControlPacketFlags(v.getByte & 0xFF)) decodeTopicFilters(remainingLen - (packetLenAtTopicFilter - v.len), topicFilters :+ topicFilter) - } else { + } else topicFilters - } val topicFilters = decodeTopicFilters(l - (packetLen - v.len), Vector.empty) val topicFiltersValid = topicFilters.nonEmpty && topicFilters.foldLeft(true) { case (true, (Right(_), tff)) if tff.underlying < ControlPacketFlags.QoSReserved.underlying => true case _ => false } - if (topicFiltersValid) { + if (topicFiltersValid) Right(Subscribe(packetId, topicFilters.flatMap { case (Right(tfs), tff) => List(tfs -> tff) case _ => List.empty })) - } else { + else Left(BadSubscribeMessage(packetId, topicFilters)) - } } catch { case _: NoSuchElementException => Left(BufferUnderflow) } @@ -1012,9 +1009,8 @@ object MqttCodec { val packetLenAtTopicFilter = v.len val returnCode = ControlPacketFlags(v.getByte & 0xFF) decodeReturnCodes(remainingLen - (packetLenAtTopicFilter - v.len), returnCodes :+ returnCode) - } else { + } else returnCodes - } val returnCodes = decodeReturnCodes(l - (packetLen - v.len), Vector.empty) Right(SubAck(packetId, returnCodes)) } catch { @@ -1034,9 +1030,8 @@ object MqttCodec { val packetLenAtTopicFilter = v.len val topicFilter = v.decodeString() decodeTopicFilters(remainingLen - (packetLenAtTopicFilter - v.len), topicFilters :+ topicFilter) - } else { + } else topicFilters - } val topicFilters = decodeTopicFilters(l - (packetLen - v.len), Vector.empty) val topicFiltersValid = topicFilters.nonEmpty && topicFilters.foldLeft(true) { case (true, Right(_)) => true diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala index bb216d2c8..1c6521c63 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/Mqtt.scala @@ -68,10 +68,10 @@ object Mqtt { /** INTERNAL API - taken from Pekko streams - perhaps it should be made public */ private[scaladsl] class CoupledTerminationBidi[I, O] extends GraphStage[BidiShape[I, I, O, O]] { - val in1: Inlet[I] = Inlet("CoupledCompletion.in1") - val out1: Outlet[I] = Outlet("CoupledCompletion.out1") - val in2: Inlet[O] = Inlet("CoupledCompletion.in2") - val out2: Outlet[O] = Outlet("CoupledCompletion.out2") + private val in1: Inlet[I] = Inlet("CoupledCompletion.in1") + private val out1: Outlet[I] = Outlet("CoupledCompletion.out1") + private val in2: Inlet[O] = Inlet("CoupledCompletion.in2") + private val out2: Outlet[O] = Outlet("CoupledCompletion.out2") override val shape: BidiShape[I, I, O, O] = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { diff --git a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala index 385ab54e3..28d57f8e5 100644 --- a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala +++ b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala @@ -1796,11 +1796,9 @@ class MqttSessionSpec if (explicitDisconnect) { fromClientQueue1.offer(disconnectBytes) - disconnectReceived.future.futureValue shouldBe Done - } else { + } else serverConnection1.complete() - } val (fromClientQueue2, serverConnection2) = server(ByteString(1)) diff --git a/mqtt-streaming/src/test/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferStateSpec.scala b/mqtt-streaming/src/test/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferStateSpec.scala index e66c82ce4..2b505c3e0 100644 --- a/mqtt-streaming/src/test/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferStateSpec.scala +++ b/mqtt-streaming/src/test/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/QueueOfferStateSpec.scala @@ -45,7 +45,7 @@ class QueueOfferStateSpec private implicit val ec: ExecutionContext = system.dispatcher private val baseBehavior = Behaviors.receivePartial[Msg] { - case (context, DoubleIt(n, reply)) => + case (_, DoubleIt(n, reply)) => reply.tell(n * 2) Behaviors.same diff --git a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/impl/MqttFlowStage.scala b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/impl/MqttFlowStage.scala index 9abd66c94..2d7b77e15 100644 --- a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/impl/MqttFlowStage.scala +++ b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/impl/MqttFlowStage.scala @@ -104,9 +104,9 @@ abstract class MqttFlowStageLogic[I](in: Inlet[I], protected def handleDeliveryComplete(token: IMqttDeliveryToken): Unit = () private val onSubscribe: AsyncCallback[Try[IMqttToken]] = getAsyncCallback[Try[IMqttToken]] { conn => - if (subscriptionPromise.isCompleted) { + if (subscriptionPromise.isCompleted) log.debug("subscription re-established") - } else { + else { subscriptionPromise.complete(conn.map(_ => { log.debug("subscription established") Done @@ -132,13 +132,12 @@ abstract class MqttFlowStageLogic[I](in: Inlet[I], private val onMessageAsyncCallback: AsyncCallback[MqttMessageWithAck] = getAsyncCallback[MqttMessageWithAck] { message => - if (isAvailable(out)) { + if (isAvailable(out)) pushDownstream(message) - } else if (queue.size + 1 > bufferSize) { + else if (queue.size + 1 > bufferSize) failStageWith(new RuntimeException(s"Reached maximum buffer size $bufferSize")) - } else { + else queue.enqueue(message) - } } private val onPublished: AsyncCallback[Try[IMqttToken]] = getAsyncCallback[Try[IMqttToken]] { @@ -187,7 +186,7 @@ abstract class MqttFlowStageLogic[I](in: Inlet[I], override def messageArrived(topic: String, pahoMessage: PahoMqttMessage): Unit = { backpressurePahoClient.acquire() val message = new MqttMessageWithAck { - override val message = MqttMessage(topic, ByteString.fromArrayUnsafe(pahoMessage.getPayload)) + override val message: MqttMessage = MqttMessage(topic, ByteString.fromArrayUnsafe(pahoMessage.getPayload)) override def ack(): Future[Done] = { val promise = Promise[Done]() @@ -209,9 +208,8 @@ abstract class MqttFlowStageLogic[I](in: Inlet[I], if (!connectionSettings.automaticReconnect) { log.info("connection lost (you might want to enable `automaticReconnect` in `MqttConnectionSettings`)") onConnectionLost.invoke(cause) - } else { + } else log.info("connection lost, trying to reconnect") - } override def connectComplete(reconnect: Boolean, serverURI: String): Unit = { pendingMsg.foreach { msg => @@ -332,7 +330,7 @@ private[mqtt] object MqttFlowStageLogic { final private case class CommitCallbackArguments(messageId: Int, qos: MqttQoS, promise: Promise[Done]) - def asConnectOptions(connectionSettings: MqttConnectionSettings): MqttConnectOptions = { + private def asConnectOptions(connectionSettings: MqttConnectionSettings): MqttConnectOptions = { val options = new MqttConnectOptions connectionSettings.auth.foreach { case (user, password) => @@ -353,9 +351,8 @@ private[mqtt] object MqttFlowStageLogic { options.setConnectionTimeout(connectionSettings.connectionTimeout.toSeconds.toInt) options.setMaxInflight(connectionSettings.maxInFlight) options.setMqttVersion(connectionSettings.mqttVersion) - if (connectionSettings.serverUris.nonEmpty) { + if (connectionSettings.serverUris.nonEmpty) options.setServerURIs(connectionSettings.serverUris.toArray) - } connectionSettings.sslHostnameVerifier.foreach(options.setSSLHostnameVerifier) if (connectionSettings.sslProperties.nonEmpty) { val properties = new Properties() @@ -365,7 +362,7 @@ private[mqtt] object MqttFlowStageLogic { options } - def asActionListener(func: Try[IMqttToken] => Unit): IMqttActionListener = new IMqttActionListener { + private def asActionListener(func: Try[IMqttToken] => Unit): IMqttActionListener = new IMqttActionListener { def onSuccess(token: IMqttToken): Unit = func(Success(token)) def onFailure(token: IMqttToken, ex: Throwable): Unit = func(Failure(ex)) diff --git a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/settings.scala b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/settings.scala index 6bc97dde3..735636ac0 100644 --- a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/settings.scala +++ b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/settings.scala @@ -287,7 +287,7 @@ final class MqttConnectionSettings private (val broker: String, sslProperties = sslProperties, offlinePersistenceSettings = offlinePersistenceSettings) - override def toString = + override def toString: String = "MqttConnectionSettings(" + s"broker=$broker," + s"clientId=$clientId," + diff --git a/mqtt/src/test/scala/docs/scaladsl/MqttSpecBase.scala b/mqtt/src/test/scala/docs/scaladsl/MqttSpecBase.scala index 3f1e14ac3..f9382b83f 100644 --- a/mqtt/src/test/scala/docs/scaladsl/MqttSpecBase.scala +++ b/mqtt/src/test/scala/docs/scaladsl/MqttSpecBase.scala @@ -42,6 +42,6 @@ abstract class MqttSpecBase(name: String) val timeout = 5.seconds - override def afterAll() = TestKit.shutdownActorSystem(system) + override def afterAll(): Unit = TestKit.shutdownActorSystem(system) } diff --git a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbSourceSettings.scala b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbSourceSettings.scala index 4b0ef46d6..793293ca8 100644 --- a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbSourceSettings.scala +++ b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbSourceSettings.scala @@ -34,7 +34,7 @@ final class OrientDbSourceSettings private ( skip = skip, limit = limit) - override def toString = + override def toString: String = "OrientDBSourceSettings(" + s"oDatabasePool=$oDatabasePool," + s"skip=$skip," + diff --git a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbWriteSettings.scala b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbWriteSettings.scala index 49a9c70d6..581cc29b7 100644 --- a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbWriteSettings.scala +++ b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/OrientDbWriteSettings.scala @@ -27,7 +27,7 @@ final class OrientDbWriteSettings private ( new OrientDbWriteSettings( oDatabasePool = oDatabasePool) - override def toString = + override def toString: String = "OrientDBUpdateSettings(" + s"oDatabasePool=$oDatabasePool" + ")" diff --git a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbFlowStage.scala b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbFlowStage.scala index c9fdbfa70..023dfa5c8 100644 --- a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbFlowStage.scala +++ b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbFlowStage.scala @@ -40,7 +40,8 @@ private[orientdb] class OrientDbFlowStage[T, C]( private val in = Inlet[immutable.Seq[OrientDbWriteMessage[T, C]]]("in") private val out = Outlet[immutable.Seq[OrientDbWriteMessage[T, C]]]("out") - override val shape = FlowShape(in, out) + override val shape: FlowShape[immutable.Seq[OrientDbWriteMessage[T, C]], immutable.Seq[OrientDbWriteMessage[T, C]]] = + FlowShape(in, out) override def initialAttributes: Attributes = // see https://orientdb.com/docs/last/Java-Multi-Threading.html super.initialAttributes.and(ActorAttributes.Dispatcher("pekko.connectors.orientdb.pinned-dispatcher")) @@ -92,13 +93,12 @@ private[orientdb] class OrientDbFlowStage[T, C]( } - final class ORecordLogic(className: String) extends OrientDbLogic { + private final class ORecordLogic(className: String) extends OrientDbLogic { override def preStart(): Unit = { super.preStart() - if (!client.getMetadata.getSchema.existsClass(className)) { + if (!client.getMetadata.getSchema.existsClass(className)) client.getMetadata.getSchema.createClass(className) - } } protected def write(messages: immutable.Seq[OrientDbWriteMessage[T, C]]): Unit = @@ -121,7 +121,7 @@ private[orientdb] class OrientDbFlowStage[T, C]( } } - final class OrientDbTypedLogic(clazz: Class[T]) extends OrientDbLogic() { + private final class OrientDbTypedLogic(clazz: Class[T]) extends OrientDbLogic() { override def preStart(): Unit = { super.preStart() diff --git a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbSourceStage.scala b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbSourceStage.scala index 0523bc833..7a2c2f935 100644 --- a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbSourceStage.scala +++ b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/impl/OrientDbSourceStage.scala @@ -55,7 +55,7 @@ private[orientdb] final class OrientDbSourceStage[T](className: String, new Logic { override protected def runQuery(): util.List[T] = client.query[util.List[T]]( - new OSQLSynchQuery[T](s"SELECT * FROM $className SKIP ${skip} LIMIT ${settings.limit}")) + new OSQLSynchQuery[T](s"SELECT * FROM $className SKIP $skip LIMIT ${settings.limit}")) } } @@ -85,7 +85,7 @@ private[orientdb] final class OrientDbSourceStage[T](className: String, oObjectClient .query[util.List[T]]( new OSQLSynchQuery[T]( - s"SELECT * FROM $className SKIP ${skip} LIMIT ${settings.limit}")) + s"SELECT * FROM $className SKIP $skip LIMIT ${settings.limit}")) } } diff --git a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/model.scala b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/model.scala index c23d49557..d1976a0bc 100644 --- a/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/model.scala +++ b/orientdb/src/main/scala/org/apache/pekko/stream/connectors/orientdb/model.scala @@ -25,7 +25,7 @@ object OrientDbWriteMessage { OrientDbWriteMessage(oDocument, NotUsed) // Java-api - with passThrough - def create[T, C](oDocument: T, passThrough: C) = + def create[T, C](oDocument: T, passThrough: C): OrientDbWriteMessage[T, C] = OrientDbWriteMessage(oDocument, passThrough) } diff --git a/orientdb/src/test/java/docs/javadsl/OrientDbTest.java b/orientdb/src/test/java/docs/javadsl/OrientDbTest.java index d602d85db..7d62f643a 100644 --- a/orientdb/src/test/java/docs/javadsl/OrientDbTest.java +++ b/orientdb/src/test/java/docs/javadsl/OrientDbTest.java @@ -333,12 +333,7 @@ public void typedStreamWithPassThrough() throws Exception { new messagesFromKafka("Effective Akka", new KafkaOffset(2))); Consumer commitToKafka = - new Consumer() { - @Override - public void accept(KafkaOffset kafkaOffset) { - committedOffsets.add(kafkaOffset.getOffset()); - } - }; + kafkaOffset -> committedOffsets.add(kafkaOffset.getOffset()); Source.from(messagesFromKafkas) .map( @@ -373,7 +368,7 @@ public void accept(KafkaOffset kafkaOffset) { assertEquals( messagesFromKafkas.stream() - .map(m -> m.getBook_title()) + .map(messagesFromKafka::getBook_title) .sorted() .collect(Collectors.toList()), result2.stream().sorted().collect(Collectors.toList())); diff --git a/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala b/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala index f4dff41f9..12a0cba38 100644 --- a/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala +++ b/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala @@ -76,9 +76,8 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with override def beforeAll() = { oServerAdmin = new OServerAdmin(url).connect(username, password) - if (!oServerAdmin.existsDatabase(dbName, "plocal")) { + if (!oServerAdmin.existsDatabase(dbName, "plocal")) oServerAdmin.createDatabase(dbName, "document", "plocal") - } // #init-settings @@ -101,15 +100,14 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with flush(sourceClass, "book_title", "Akka Concurrency") } - override def afterAll() = { + override def afterAll(): Unit = { unregister(sourceClass) unregister(sink4) unregister(sink5) unregister(sink7) - if (oServerAdmin.existsDatabase(dbName, "plocal")) { + if (oServerAdmin.existsDatabase(dbName, "plocal")) oServerAdmin.dropDatabase(dbName, "plocal") - } oServerAdmin.close() client.close() @@ -253,8 +251,8 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // After we've written them to oRIENTdb, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage(book: Book, offset: KafkaOffset) + final case class KafkaOffset(offset: Int) + final case class KafkaMessage(book: Book, offset: KafkaOffset) val messagesFromKafka = List( KafkaMessage(Book("Book 1"), KafkaOffset(0)), diff --git a/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java b/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java index 19e02103d..43b08a034 100644 --- a/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java +++ b/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java @@ -29,15 +29,9 @@ import java.util.Optional; import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.nio.ByteBuffer; - -import io.pravega.client.tables.TableKey; import org.apache.pekko.util.OptionConverters; -import scala.Option; - @ApiMayChange public class PravegaTable { @@ -73,6 +67,6 @@ public static Source, CompletionStage> source( public static Flow, NotUsed> readFlow( String scope, String tableName, TableSettings tableSettings) { return Flow.fromGraph(new PravegaTableReadFlow(scope, tableName, tableSettings)) - .map(o -> OptionConverters.toJava(o)); + .map(OptionConverters::toJava); } } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/PravegaSettings.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/PravegaSettings.scala index c88d8e323..fc250570e 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/PravegaSettings.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/PravegaSettings.scala @@ -193,7 +193,8 @@ class WriterSettingsBuilder[Message]( f: EventWriterConfigBuilder => EventWriterConfigBuilder): WriterSettingsBuilder[Message] = copy(eventWriterConfigCustomizer = Some(f)) - def withClientConfig(clientConfig: ClientConfig) = copy(clientConfig = Some(clientConfig)) + def withClientConfig(clientConfig: ClientConfig): WriterSettingsBuilder[Message] = + copy(clientConfig = Some(clientConfig)) def clientConfigBuilder( clientConfigCustomization: ClientConfigBuilder => ClientConfigBuilder): WriterSettingsBuilder[Message] = @@ -282,7 +283,7 @@ object WriterSettingsBuilder { private def eventWriterConfig(readerConfig: Config): EventWriterConfigBuilder = { val builder = EventWriterConfig.builder() - implicit val config = readerConfig.getConfig("config") + implicit val config: Config = readerConfig.getConfig("config") extractBoolean("automatically-note-time")(builder.automaticallyNoteTime) extractInt("backoff-multiple")(builder.backoffMultiple) @@ -314,7 +315,8 @@ class TableReaderSettingsBuilder[K, V]( : TableReaderSettingsBuilder[K, V] = copy(keyValueTableClientConfigurationBuilderCustomizer = Some(f)) - def withClientConfigModifier(clientConfig: ClientConfig) = copy(clientConfig = Some(clientConfig)) + def withClientConfigModifier(clientConfig: ClientConfig): TableReaderSettingsBuilder[K, V] = + copy(clientConfig = Some(clientConfig)) def withMaximumInflightMessages(i: Int): TableReaderSettingsBuilder[K, V] = copy(maximumInflightMessages = i) @@ -440,7 +442,8 @@ class TableWriterSettingsBuilder[K, V]( : TableWriterSettingsBuilder[K, V] = copy(keyValueTableClientConfigurationBuilderCustomizer = Some(f)) - def withClientConfig(clientConfig: ClientConfig) = copy(clientConfig = Some(clientConfig)) + def withClientConfig(clientConfig: ClientConfig): TableWriterSettingsBuilder[K, V] = + copy(clientConfig = Some(clientConfig)) def clientConfigBuilder( clientConfigCustomization: ClientConfigBuilder => ClientConfigBuilder): TableWriterSettingsBuilder[K, V] = @@ -546,9 +549,9 @@ private[pravega] class ReaderBasicSetting( var groupName: Option[String] = None, var readerId: Option[String] = None, var timeout: Duration = Duration.ofSeconds(5)) { - def withGroupName(name: String) = groupName = Some(name) - def withReaderId(name: String) = readerId = Some(name) - def withTimeout(t: Duration) = timeout = t + def withGroupName(name: String): Unit = groupName = Some(name) + def withReaderId(name: String): Unit = readerId = Some(name) + def withTimeout(t: Duration): Unit = timeout = t } /** @@ -615,7 +618,7 @@ private[pravega] object ConfigHelper { val builder = ReaderConfig .builder() - implicit val c = config.getConfig("config") + implicit val c: Config = config.getConfig("config") extractBoolean("disable-time-windows")(builder.disableTimeWindows) extractLong("initial-allocation-delay")(builder.initialAllocationDelay) @@ -625,7 +628,7 @@ private[pravega] object ConfigHelper { def buildClientConfigFromTypeSafeConfig(config: Config): ClientConfig = { val builder = ClientConfig.builder() - implicit val c = config.getConfig("client-config") + implicit val c: Config = config.getConfig("client-config") extractString("controller-uri") { uri => builder.controllerURI(new URI(uri)) } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaCapabilities.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaCapabilities.scala index 7b290fe81..0229b918f 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaCapabilities.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaCapabilities.scala @@ -27,12 +27,12 @@ import scala.util.{ Failure, Success, Try } protected val scope: String protected val clientConfig: ClientConfig - lazy val eventStreamClientFactory = EventStreamClientFactory.withScope(scope, clientConfig) + lazy val eventStreamClientFactory: EventStreamClientFactory = EventStreamClientFactory.withScope(scope, clientConfig) - def close() = Try(eventStreamClientFactory.close()) match { + def close(): Unit = Try(eventStreamClientFactory.close()) match { case Failure(exception) => log.error(exception, "Error while closing scope [{}]", scope) - case Success(value) => + case Success(_) => log.debug("Closed scope [{}]", scope) } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala index 328b487a6..e980c0fcf 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala @@ -13,6 +13,8 @@ package org.apache.pekko.stream.connectors.pravega.impl +import io.pravega.client.ClientConfig + import java.util.concurrent.{ CompletableFuture, Semaphore } import org.apache.pekko import pekko.annotation.InternalApi @@ -38,7 +40,7 @@ import scala.util.{ Failure, Success, Try } private def in = shape.in private def out = shape.out - val clientConfig = writerSettings.clientConfig + val clientConfig: ClientConfig = writerSettings.clientConfig private var writer: EventStreamWriter[A] = _ @@ -109,7 +111,7 @@ import scala.util.{ Failure, Success, Try } Try(writer.close()) match { case Failure(exception) => log.error(exception, "Error while closing writer to stream [{}] in scope [{}}]", streamName, scope) - case Success(value) => + case Success(_) => log.debug("Closed writer to stream [{}] in scope [{}}]", streamName, scope) } close() diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaSource.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaSource.scala index 715925c30..1550937ad 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaSource.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaSource.scala @@ -31,6 +31,7 @@ import pekko.stream.ActorAttributes import pekko.stream.stage.AsyncCallback import java.util.UUID +import scala.annotation.tailrec import scala.util.{ Failure, Success, Try } @InternalApi private final class PravegaSourcesStageLogic[A]( @@ -41,7 +42,7 @@ import scala.util.{ Failure, Success, Try } with PravegaCapabilities with StageLogging { - protected val scope = readerGroup.getScope + protected val scope: String = readerGroup.getScope override protected def logSource = classOf[PravegaSourcesStageLogic[A]] @@ -59,6 +60,7 @@ import scala.util.{ Failure, Success, Try } out, new OutHandler { + @tailrec override def onPull(): Unit = { val eventRead = reader.readNextEvent(readerSettings.timeout) if (eventRead.isCheckpoint) { @@ -83,7 +85,7 @@ import scala.util.{ Failure, Success, Try } startupPromise.success(Done) } catch { case NonFatal(exception) => - log.error(exception.getMessage()) + log.error(exception.getMessage) failStage(exception) } } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala index 6e3e249b7..2361e9186 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala @@ -52,7 +52,7 @@ import scala.util.Success private var inFlight = 0 @volatile - private var upstreamEnded = false; + private var upstreamEnded = false private val asyncMessageSendCallback: AsyncCallback[Try[TableEntry]] = getAsyncCallback { p => p match { @@ -60,7 +60,7 @@ import scala.util.Success log.error(exception, s"Failed to send message {}") case Success(kv) => if (kv != null) - push(out, Some(tableSettings.valueSerializer.deserialize(kv.getValue()))) + push(out, Some(tableSettings.valueSerializer.deserialize(kv.getValue))) else push(out, None) @@ -69,7 +69,6 @@ import scala.util.Success if (inFlight == 0 && upstreamEnded) { log.info("Stage completed after upstream finish") completeStage() - } } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableSource.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableSource.scala index e5d6ed852..39daa2cd4 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableSource.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableSource.scala @@ -13,7 +13,6 @@ package org.apache.pekko.stream.connectors.pravega.impl -import java.util.function.Consumer import org.apache.pekko import pekko.stream.stage.{ AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, OutHandler, StageLogging } import pekko.stream.{ Attributes, Outlet, SourceShape } @@ -47,7 +46,8 @@ import io.pravega.common.util.AsyncIterator startupPromise: Promise[Done]) extends GraphStageLogic(shape) with StageLogging { - override protected def logSource = classOf[PravegaTableSourceStageLogic[K, V]] + override protected def logSource: Class[PravegaTableSourceStageLogic[K, V]] = + classOf[PravegaTableSourceStageLogic[K, V]] private def out = shape.out @@ -60,16 +60,12 @@ import io.pravega.common.util.AsyncIterator private var closing = false - val logThat: AsyncCallback[String] = getAsyncCallback { message => - log.info(message) - } - - private def pushElement(out: Outlet[TableEntry[V]], element: TableEntry[V]) = { + private def pushElement(out: Outlet[TableEntry[V]], element: TableEntry[V]): Unit = { push(out, element) semaphore.release() } - val onElement: AsyncCallback[TableEntry[V]] = getAsyncCallback[TableEntry[V]] { element => + private val onElement: AsyncCallback[TableEntry[V]] = getAsyncCallback[TableEntry[V]] { element => if (isAvailable(out) && queue.isEmpty) pushElement(out, element) else @@ -77,7 +73,7 @@ import io.pravega.common.util.AsyncIterator } - val onFinish: AsyncCallback[Unit] = getAsyncCallback[Unit] { _ => + private val onFinish: AsyncCallback[Unit] = getAsyncCallback[Unit] { _ => closing = true if (queue.isEmpty) completeStage() @@ -88,30 +84,28 @@ import io.pravega.common.util.AsyncIterator out, new OutHandler { override def onPull(): Unit = { - if (!queue.isEmpty) + if (queue.nonEmpty) pushElement(out, queue.dequeue()) if (closing && queue.isEmpty) completeStage() } }) - def nextIteration(iterator: AsyncIterator[IteratorItem[JTableEntry]]): Unit = + private def nextIteration(iterator: AsyncIterator[IteratorItem[JTableEntry]]): Unit = iterator.getNext - .thenAccept(new Consumer[IteratorItem[JTableEntry]] { - override def accept(iteratorItem: IteratorItem[JTableEntry]): Unit = { - if (iteratorItem == null) { - onFinish.invoke(()) - } else { - iteratorItem.getItems.stream().forEach { tableEntry => - semaphore.acquire() - - val entry = new TableEntry(tableEntry.getKey(), - tableEntry.getVersion(), - tableReaderSettings.valueSerializer.deserialize(tableEntry.getValue())) - onElement.invoke(entry) - } - nextIteration(iterator) + .thenAccept((iteratorItem: IteratorItem[JTableEntry]) => { + if (iteratorItem == null) + onFinish.invoke(()) + else { + iteratorItem.getItems.stream().forEach { tableEntry => + semaphore.acquire() + + val entry = new TableEntry(tableEntry.getKey, + tableEntry.getVersion, + tableReaderSettings.valueSerializer.deserialize(tableEntry.getValue)) + onElement.invoke(entry) } + nextIteration(iterator) } }) @@ -133,7 +127,7 @@ import io.pravega.common.util.AsyncIterator startupPromise.success(Done) } catch { case NonFatal(exception) => - log.error(exception.getMessage()) + log.error(exception.getMessage) failStage(exception) } } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala index 3e45c5446..f8ccb5c90 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala @@ -69,7 +69,6 @@ import io.pravega.client.tables.TableKey if (onAir.decrementAndGet == 0 && upstreamEnded) { log.debug("Stage completed after upstream finish") completeStage() - } } @@ -137,7 +136,7 @@ import io.pravega.client.tables.TableKey Try(table.close()) match { case Failure(exception) => log.error(exception, "Error while closing table [{}]", tableName) - case Success(value) => + case Success(_) => log.debug("Closed table [{}]", tableName) } keyValueTableFactory.close() @@ -145,7 +144,7 @@ import io.pravega.client.tables.TableKey } @InternalApi private[pravega] final class PravegaTableWriteFlow[KVPair, K, V]( - kvpToTuple2: KVPair => Tuple2[K, V], + kvpToTuple2: KVPair => (K, V), scope: String, streamName: String, tableWriterSettings: TableWriterSettings[K, V]) extends GraphStage[FlowShape[KVPair, KVPair]] { diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaWriter.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaWriter.scala index bd34d7dc2..6fa0853d5 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaWriter.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaWriter.scala @@ -13,6 +13,7 @@ package org.apache.pekko.stream.connectors.pravega.impl +import io.pravega.client.stream.EventStreamWriter import org.apache.pekko import pekko.annotation.InternalApi import pekko.stream.connectors.pravega.WriterSettings @@ -20,7 +21,7 @@ import pekko.stream.stage.StageLogging @InternalApi private[pravega] trait PravegaWriter extends PravegaCapabilities { this: StageLogging => - def createWriter[A](streamName: String, writerSettings: WriterSettings[A]) = + def createWriter[A](streamName: String, writerSettings: WriterSettings[A]): EventStreamWriter[A] = eventStreamClientFactory.createEventWriter( streamName, writerSettings.serializer, diff --git a/pravega/src/test/java/docs/javadsl/PravegaReadWriteDocs.java b/pravega/src/test/java/docs/javadsl/PravegaReadWriteDocs.java index 90edeb96b..03bf9ecad 100644 --- a/pravega/src/test/java/docs/javadsl/PravegaReadWriteDocs.java +++ b/pravega/src/test/java/docs/javadsl/PravegaReadWriteDocs.java @@ -115,7 +115,7 @@ public Integer deserialize(ByteBuffer serializedValue) { final CompletionStage pair = PravegaTable.source("an_existing_scope", "an_existing_tableName", tableReaderSettings) - .to(Sink.foreach((TableEntry kvp) -> processKVP(kvp))) + .to(Sink.foreach(PravegaReadWriteDocs::processKVP)) .run(system); // #table-reading diff --git a/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaGraphTestCase.java b/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaGraphTestCase.java index 677f7f446..6789c2e8d 100644 --- a/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaGraphTestCase.java +++ b/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaGraphTestCase.java @@ -95,7 +95,7 @@ public void infiniteSourceTest() Pair> pair = Pravega.source(readerGroup, readerSettings) - .map(e -> e.message()) + .map(PravegaEvent::message) .viaMat(KillSwitches.single(), Keep.right()) .toMat( Sink.fold( diff --git a/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaKVTableTestCase.java b/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaKVTableTestCase.java index 1e5c55d9a..d068ec1d9 100644 --- a/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaKVTableTestCase.java +++ b/pravega/src/test/java/org/apache/pekko/stream/connectors/pravega/PravegaKVTableTestCase.java @@ -99,14 +99,13 @@ public void writeAndReadInKVTable() Sink.fold( "", (acc, p) -> { - if (acc == "") return p.value(); + if (acc.isEmpty()) return p.value(); return acc + ", " + p.value(); }), system); String result = readingDone.toCompletableFuture().get(timeoutSeconds, TimeUnit.SECONDS); - Assert.assertTrue( - String.format("Read 2 elements [%s]", result), result.equals("One, Two, Three, Four")); + Assert.assertEquals(String.format("Read 2 elements [%s]", result), "One, Two, Three, Four", result); Flow, NotUsed> readFlow = PravegaTable.readFlow(scope, tableName, tableReaderSettings); diff --git a/pravega/src/test/scala/docs/scaladsl/Model.scala b/pravega/src/test/scala/docs/scaladsl/Model.scala index a619dbcfb..7040849cd 100644 --- a/pravega/src/test/scala/docs/scaladsl/Model.scala +++ b/pravega/src/test/scala/docs/scaladsl/Model.scala @@ -13,4 +13,4 @@ package docs.scaladsl -case class Person(id: Int, firstname: String) +final case class Person(id: Int, firstname: String) diff --git a/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaBaseSpec.scala b/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaBaseSpec.scala index 9cdc7de68..3351a7762 100644 --- a/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaBaseSpec.scala +++ b/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaBaseSpec.scala @@ -34,7 +34,7 @@ abstract class PravegaBaseSpec with AnyWordSpecLike with ScalaFutures with Matchers { - val logger = LoggerFactory.getLogger(this.getClass()) + val logger = LoggerFactory.getLogger(this.getClass) def time[R](label: String, block: => R): R = { val t0 = System.nanoTime() / 1000000 @@ -49,7 +49,7 @@ abstract class PravegaBaseSpec def newKeyValueTableName() = "scala-test-kv-table" + UUID.randomUUID().toString - def createStream(scope: String, streamName: String) = { + def createStream(scope: String, streamName: String): Unit = { val streamManager = StreamManager.create(URI.create("tcp://localhost:9090")) if (streamManager.createScope(scope)) logger.info(s"Created scope [$scope].") @@ -69,9 +69,8 @@ abstract class PravegaBaseSpec val streamManager = StreamManager.create(URI.create("tcp://localhost:9090")) if (streamManager.createScope(scope)) logger.info(s"Created scope [$scope].") - else { + else logger.info(s"Scope [$scope] already exists.") - } streamManager.close() val clientConfig = ClientConfig .builder() diff --git a/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaKVTableSpec.scala b/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaKVTableSpec.scala index 34d183e9c..1747dcec2 100644 --- a/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaKVTableSpec.scala +++ b/pravega/src/test/scala/org/apache/pekko/stream/connectors/pravega/PravegaKVTableSpec.scala @@ -56,7 +56,7 @@ class PravegaKVTableSpec extends PravegaBaseSpec with Repeated { val readingDone = PravegaTable .source(scope, tableName, tableSettings) - .toMat(Sink.fold(0) { (sum, value) => + .toMat(Sink.fold(0) { (sum, _) => sum + 1 })(Keep.right) .run() diff --git a/project/Common.scala b/project/Common.scala index 5bbed001c..1da453dcd 100644 --- a/project/Common.scala +++ b/project/Common.scala @@ -81,11 +81,10 @@ object Common extends AutoPlugin { "-doc-canonical-base-url", "https://pekko.apache.org/api/pekko-connectors/current/"), Compile / doc / scalacOptions ++= { - if (isScala3.value) { + if (isScala3.value) Seq("-skip-packages:" + packagesToSkip) - } else { + else Seq("-skip-packages", packagesToSkip) - } }, Compile / doc / scalacOptions -= "-Werror", compile / javacOptions ++= Seq( diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala index 461fbe92f..60ef55d05 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala @@ -25,6 +25,7 @@ import pekko.actor.{ import pekko.stream.scaladsl.Flow import pekko.util.ByteString import com.typesafe.config.Config +import org.apache.pekko.NotUsed /** * Some connectors might require an external resource that is used in the @@ -41,18 +42,18 @@ import com.typesafe.config.Config */ final class Resource private (val settings: ResourceSettings) { // a resource that is to be used when creating Pekko Stream operators. - val connection = Flow[ByteString].map(_.reverse) + val connection: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].map(_.reverse) /** * Resource cleanup logic */ - def cleanup() = {} + def cleanup(): Unit = {} } object Resource { - def apply(settings: ResourceSettings) = new Resource(settings) + def apply(settings: ResourceSettings): Resource = new Resource(settings) - def create(settings: ResourceSettings) = Resource(settings) + def create(settings: ResourceSettings): Resource = Resource(settings) } /** @@ -68,7 +69,7 @@ final class ResourceSettings private (val msg: String) { * instance for reading values from HOCON. */ object ResourceSettings { - val ConfigPath = "pekko.connectors.reference" + val ConfigPath: String = "pekko.connectors.reference" def apply(msg: String): ResourceSettings = new ResourceSettings(msg) @@ -122,7 +123,7 @@ final class ResourceExt private (sys: ExtendedActorSystem) extends Extension { } object ResourceExt extends ExtensionId[ResourceExt] with ExtensionIdProvider { - override def lookup = ResourceExt + override def lookup: ResourceExt.type = ResourceExt override def createExtension(system: ExtendedActorSystem) = new ResourceExt(system) /** diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/attributes.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/attributes.scala index b33a3b338..87826e4eb 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/attributes.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/attributes.scala @@ -23,7 +23,7 @@ object ReferenceAttributes { /** * Wrap a `Resource` to an attribute so it can be attached to a stream stage. */ - def resource(resource: Resource) = Attributes(new ReferenceResourceValue(resource)) + def resource(resource: Resource): Attributes = Attributes(new ReferenceResourceValue(resource)) } final class ReferenceResourceValue @InternalApi private[reference] (val resource: Resource) extends Attribute diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/impl/ReferenceFlowStage.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/impl/ReferenceFlowStage.scala index d6db7559b..60b67730f 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/impl/ReferenceFlowStage.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/impl/ReferenceFlowStage.scala @@ -62,7 +62,7 @@ import pekko.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } /** * INTERNAL API */ -@InternalApi private[reference] final class ReferenceFlowStage() +@InternalApi private[reference] final class ReferenceFlowStage extends GraphStage[FlowShape[ReferenceWriteMessage, ReferenceWriteResult]] { val in: Inlet[ReferenceWriteMessage] = Inlet(Logging.simpleName(this) + ".in") val out: Outlet[ReferenceWriteResult] = Outlet(Logging.simpleName(this) + ".out") diff --git a/reference/src/test/java/docs/javadsl/ReferenceTest.java b/reference/src/test/java/docs/javadsl/ReferenceTest.java index d8a4b466d..698709974 100644 --- a/reference/src/test/java/docs/javadsl/ReferenceTest.java +++ b/reference/src/test/java/docs/javadsl/ReferenceTest.java @@ -112,7 +112,7 @@ public void runFlow() throws Exception { new HashMap() { { put("rps", 20L); - put("rpm", Long.valueOf(30L)); + put("rpm", 30L); } }; diff --git a/reference/src/test/scala/docs/scaladsl/ReferenceSpec.scala b/reference/src/test/scala/docs/scaladsl/ReferenceSpec.scala index f8a2c19c2..fd910462a 100644 --- a/reference/src/test/scala/docs/scaladsl/ReferenceSpec.scala +++ b/reference/src/test/scala/docs/scaladsl/ReferenceSpec.scala @@ -139,7 +139,7 @@ class ReferenceSpec extends AnyWordSpec with BeforeAndAfterAll with ScalaFutures } - override def afterAll() = + override def afterAll(): Unit = TestKit.shutdownActorSystem(system) } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Attributes.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Attributes.scala index fe53015e8..f93143cc6 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Attributes.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Attributes.scala @@ -37,10 +37,10 @@ final class S3SettingsPath private (val path: String) extends Attribute object S3SettingsPath { val Default: S3SettingsPath = S3SettingsPath(S3Settings.ConfigPath) - def apply(path: String) = new S3SettingsPath(path) + def apply(path: String): S3SettingsPath = new S3SettingsPath(path) } final class S3SettingsValue private (val settings: S3Settings) extends Attribute object S3SettingsValue { - def apply(settings: S3Settings) = new S3SettingsValue(settings) + def apply(settings: S3Settings): S3SettingsValue = new S3SettingsValue(settings) } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Exception.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Exception.scala index 5478f065b..2f749e70a 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Exception.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Exception.scala @@ -46,7 +46,7 @@ object S3Exception { (xmlResponse \ "RequestId").text, (xmlResponse \ "Resource").text) } catch { - case e: Exception => + case _: Exception => new S3Exception(statusCode, statusCode.toString, response, "-", "-") } } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Ext.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Ext.scala index 1c209382d..1fad03d86 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Ext.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Ext.scala @@ -27,7 +27,7 @@ final class S3Ext private (sys: ExtendedActorSystem) extends Extension { object S3Ext extends ExtensionId[S3Ext] with ExtensionIdProvider { override def lookup: S3Ext.type = S3Ext - override def createExtension(system: ExtendedActorSystem) = new S3Ext(system) + override def createExtension(system: ExtendedActorSystem): S3Ext = new S3Ext(system) /** * Java API. diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Headers.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Headers.scala index 7ab95395b..22fdd3473 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Headers.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/S3Headers.scala @@ -51,7 +51,7 @@ final class MetaHeaders private (val metaHeaders: Map[String, String]) { } object MetaHeaders { - def apply(metaHeaders: Map[String, String]) = + def apply(metaHeaders: Map[String, String]): MetaHeaders = new MetaHeaders(metaHeaders) def create(metaHeaders: java.util.Map[String, String]): MetaHeaders = diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/ServerSideEncryption.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/ServerSideEncryption.scala index 8d2dd5e6c..a25d65f4e 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/ServerSideEncryption.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/ServerSideEncryption.scala @@ -45,7 +45,7 @@ object ServerSideEncryption { new CustomerKeys(key) } -final class AES256 private[headers] () extends ServerSideEncryption { +final class AES256 private[headers] extends ServerSideEncryption { @InternalApi private[s3] override def headers: immutable.Seq[HttpHeader] = RawHeader("x-amz-server-side-encryption", "AES256") :: Nil @@ -59,8 +59,8 @@ final class AES256 private[headers] () extends ServerSideEncryption { ")" override def equals(other: Any): Boolean = other match { - case that: AES256 => true - case _ => false + case _: AES256 => true + case _ => false } override def hashCode(): Int = diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/StorageClass.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/StorageClass.scala index b19298bd8..adf5c304c 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/StorageClass.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/headers/StorageClass.scala @@ -26,8 +26,8 @@ final class StorageClass private (val storageClass: String) { } object StorageClass { - val Standard = new StorageClass("STANDARD") - val InfrequentAccess = new StorageClass("STANDARD_IA") - val Glacier = new StorageClass("GLACIER") - val ReducedRedundancy = new StorageClass("REDUCED_REDUNDANCY") + val Standard: StorageClass = new StorageClass("STANDARD") + val InfrequentAccess: StorageClass = new StorageClass("STANDARD_IA") + val Glacier: StorageClass = new StorageClass("GLACIER") + val ReducedRedundancy: StorageClass = new StorageClass("REDUCED_REDUNDANCY") } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBuffer.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBuffer.scala index 3c4ec2d7e..80df9e09f 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBuffer.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBuffer.scala @@ -52,11 +52,11 @@ import pekko.annotation.InternalApi require(maxMaterializations > 0, "maxMaterializations should be at least 1") require(maxSize > 0, "maximumSize should be at least 1") - val in = Inlet[ByteString]("DiskBuffer.in") - val out = Outlet[Chunk]("DiskBuffer.out") - override val shape = FlowShape.of(in, out) + val in: Inlet[ByteString] = Inlet[ByteString]("DiskBuffer.in") + val out: Outlet[Chunk] = Outlet[Chunk]("DiskBuffer.out") + override val shape: FlowShape[ByteString, Chunk] = FlowShape.of(in, out) - override def initialAttributes = + override def initialAttributes: Attributes = super.initialAttributes and Attributes.name("DiskBuffer") and ActorAttributes.IODispatcher override def createLogic(attr: Attributes): GraphStageLogic = @@ -74,9 +74,8 @@ import pekko.annotation.InternalApi override def onPush(): Unit = { val elem = grab(in) length += elem.size - if (length > maxSize) { + if (length > maxSize) throw new BufferOverflowException() - } pathOut.write(elem.toArray) pull(in) @@ -91,7 +90,7 @@ import pekko.annotation.InternalApi // close stream even if we didn't emit try { pathOut.close() - } catch { case x: Throwable => () } + } catch { case _: Throwable => () } private def emit(): Unit = { pathOut.close() @@ -101,7 +100,6 @@ import pekko.annotation.InternalApi if (deleteCounter.decrementAndGet() <= 0) f.onComplete { _ => path.delete() - }(ExecutionContexts.parasitic) NotUsed } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequests.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequests.scala index e3d489738..0c0f1f582 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequests.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequests.scala @@ -384,8 +384,7 @@ import scala.xml.NodeSeq val fixedPath = rawPath.replaceAll("\\+", "%2B") require(rawUri.startsWith(rawPath)) fixedPath + rawUri.drop(rawPath.length) - } else { + } else rawUri - } } } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/Marshalling.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/Marshalling.scala index 4e9cacef5..98f3fcc18 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/Marshalling.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/Marshalling.scala @@ -67,8 +67,8 @@ import scala.xml.NodeSeq } } - val isTruncated = "IsTruncated" - val apiV2ContinuationToken = "NextContinuationToken" + private val isTruncated = "IsTruncated" + private val apiV2ContinuationToken = "NextContinuationToken" implicit val listBucketResultUnmarshaller: FromEntityUnmarshaller[ListBucketResult] = { nodeSeqUnmarshaller(MediaTypes.`application/xml`.withCharset(HttpCharsets.`UTF-8`)).map { diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryBuffer.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryBuffer.scala index 682116f68..89e9c1f3b 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryBuffer.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryBuffer.scala @@ -30,9 +30,9 @@ import pekko.util.ByteString * @param maxSize Maximum size to buffer */ @InternalApi private[impl] final class MemoryBuffer(maxSize: Int) extends GraphStage[FlowShape[ByteString, Chunk]] { - val in = Inlet[ByteString]("MemoryBuffer.in") - val out = Outlet[Chunk]("MemoryBuffer.out") - override val shape = FlowShape.of(in, out) + val in: Inlet[ByteString] = Inlet[ByteString]("MemoryBuffer.in") + val out: Outlet[Chunk] = Outlet[Chunk]("MemoryBuffer.out") + override val shape: FlowShape[ByteString, Chunk] = FlowShape.of(in, out) override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { @@ -42,9 +42,9 @@ import pekko.util.ByteString override def onPush(): Unit = { val elem = grab(in) - if (buffer.size + elem.size > maxSize) { + if (buffer.size + elem.size > maxSize) failStage(new IllegalStateException("Buffer size of " + maxSize + " bytes exceeded.")) - } else { + else { buffer ++= elem pull(in) } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryWithContext.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryWithContext.scala index e58d09818..7184b513b 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryWithContext.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/MemoryWithContext.scala @@ -35,9 +35,9 @@ import scala.collection.mutable.ListBuffer */ @InternalApi private[impl] final class MemoryWithContext[C](maxSize: Int) extends GraphStage[FlowShape[(ByteString, C), (Chunk, immutable.Iterable[C])]] { - val in = Inlet[(ByteString, C)]("MemoryBuffer.in") - val out = Outlet[(Chunk, immutable.Iterable[C])]("MemoryBuffer.out") - override val shape = FlowShape.of(in, out) + val in: Inlet[(ByteString, C)] = Inlet[(ByteString, C)]("MemoryBuffer.in") + val out: Outlet[(Chunk, immutable.Iterable[C])] = Outlet[(Chunk, immutable.Iterable[C])]("MemoryBuffer.out") + override val shape: FlowShape[(ByteString, C), (Chunk, immutable.Iterable[C])] = FlowShape.of(in, out) override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { @@ -48,9 +48,9 @@ import scala.collection.mutable.ListBuffer override def onPush(): Unit = { val (elem, context) = grab(in) - if (buffer.size + elem.size > maxSize) { + if (buffer.size + elem.size > maxSize) failStage(new IllegalStateException("Buffer size of " + maxSize + " bytes exceeded.")) - } else { + else { buffer ++= elem // This is a corner case where context can have a sentinel value of null which represents the initial empty // stream. We don't want to add null's into the final output diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala index c43070fab..3303035f6 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala @@ -237,22 +237,22 @@ import scala.util.{ Failure, Success, Try } /** * An ADT that represents the current state of pagination */ - sealed trait S3PaginationState[T] + private sealed trait S3PaginationState[T] - final case class Starting[T]() extends S3PaginationState[T] + private final case class Starting[T]() extends S3PaginationState[T] /** * S3 typically does pagination by the use of a continuation token which is a unique pointer that is * provided upon each page request that when provided for the next request, only retrieves results * **after** that token */ - final case class Running[T](continuationToken: T) extends S3PaginationState[T] + private final case class Running[T](continuationToken: T) extends S3PaginationState[T] - final case class Finished[T]() extends S3PaginationState[T] + private final case class Finished[T]() extends S3PaginationState[T] - type ListBucketState = S3PaginationState[String] + private type ListBucketState = S3PaginationState[String] - def listBucketCall[T]( + private def listBucketCall[T]( bucket: String, prefix: Option[String], delimiter: Option[String], @@ -346,9 +346,9 @@ import scala.util.{ Failure, Success, Try } } .mapMaterializedValue(_ => NotUsed) - type ListMultipartUploadState = S3PaginationState[ListMultipartUploadContinuationToken] + private type ListMultipartUploadState = S3PaginationState[ListMultipartUploadContinuationToken] - def listMultipartUploadCall[T]( + private def listMultipartUploadCall[T]( bucket: String, prefix: Option[String], delimiter: Option[String], @@ -424,9 +424,9 @@ import scala.util.{ Failure, Success, Try } .mapMaterializedValue(_ => NotUsed) } - type ListPartsState = S3PaginationState[Int] + private type ListPartsState = S3PaginationState[Int] - def listPartsCall[T]( + private def listPartsCall[T]( bucket: String, key: String, uploadId: String, @@ -470,9 +470,9 @@ import scala.util.{ Failure, Success, Try } .mapMaterializedValue(_ => NotUsed) } - type ListObjectVersionsState = S3PaginationState[ListObjectVersionContinuationToken] + private type ListObjectVersionsState = S3PaginationState[ListObjectVersionContinuationToken] - def listObjectVersionsCall[T]( + private def listObjectVersionsCall[T]( bucket: String, delimiter: Option[String], prefix: Option[String], @@ -730,7 +730,6 @@ import scala.util.{ Failure, Success, Try } bucket = bucket, method = HttpMethods.PUT, httpRequest = bucketManagementRequest(bucket), - headers.headersFor(MakeBucket), process = processS3LifecycleResponse, httpEntity = maybeRegionPayload) } @@ -745,7 +744,6 @@ import scala.util.{ Failure, Success, Try } bucket = bucket, method = HttpMethods.DELETE, httpRequest = bucketManagementRequest(bucket), - headers.headersFor(DeleteBucket), process = processS3LifecycleResponse) def deleteBucket(bucket: String, headers: S3Headers)(implicit mat: Materializer, attr: Attributes): Future[Done] = @@ -756,7 +754,6 @@ import scala.util.{ Failure, Success, Try } bucket = bucketName, method = HttpMethods.HEAD, httpRequest = bucketManagementRequest(bucketName), - headers.headersFor(CheckBucket), process = processCheckIfExistsResponse) def checkIfBucketExists(bucket: String, headers: S3Headers)(implicit mat: Materializer, @@ -772,7 +769,6 @@ import scala.util.{ Failure, Success, Try } bucket = bucket, method = HttpMethods.DELETE, httpRequest = uploadManagementRequest(bucket, key, uploadId), - headers.headersFor(DeleteBucket), process = processS3LifecycleResponse) def deleteUpload(bucket: String, key: String, uploadId: String, headers: S3Headers)(implicit mat: Materializer, @@ -790,7 +786,6 @@ import scala.util.{ Failure, Success, Try } bucket = bucket, method = HttpMethods.PUT, httpRequest = bucketVersioningRequest(bucket, bucketVersioning.mfaDelete, headers), - headers.headersFor(PutBucketVersioning), process = processS3LifecycleResponse, httpEntity = Some(putBucketVersioningPayload(bucketVersioning)(ExecutionContexts.parasitic))) @@ -805,7 +800,6 @@ import scala.util.{ Failure, Success, Try } bucket = bucket, method = HttpMethods.GET, httpRequest = bucketVersioningRequest(bucket, None, headers), - headers.headersFor(GetBucketVersioning), process = { (response: HttpResponse, mat: Materializer) => response match { case HttpResponse(status, _, entity, _) if status.isSuccess() => @@ -824,7 +818,6 @@ import scala.util.{ Failure, Success, Try } bucket: String, method: HttpMethod, httpRequest: (HttpMethod, S3Settings) => HttpRequest, - headers: Seq[HttpHeader], process: (HttpResponse, Materializer) => Future[T], httpEntity: Option[Future[RequestEntity]] = None): Source[T, NotUsed] = Source @@ -898,7 +891,7 @@ import scala.util.{ Failure, Success, Try } chunkSize: Int = MinChunkSize, chunkingParallelism: Int = 4): Sink[ByteString, Future[MultipartUploadResult]] = chunkAndRequest(s3Location, contentType, s3Headers, chunkSize)(chunkingParallelism) - .toMat(completionSink(s3Location, s3Headers))(Keep.right) + .toMat(completionSink(s3Headers))(Keep.right) /** * Uploads a stream of ByteStrings along with a context to a specified location as a multipart upload. The @@ -912,7 +905,7 @@ import scala.util.{ Failure, Success, Try } chunkSize: Int = MinChunkSize, chunkingParallelism: Int = 4): Sink[(ByteString, C), Future[MultipartUploadResult]] = chunkAndRequestWithContext[C](s3Location, contentType, s3Headers, chunkSize, chunkUploadSink)(chunkingParallelism) - .toMat(completionSink(s3Location, s3Headers))(Keep.right) + .toMat(completionSink(s3Headers))(Keep.right) /** * Resumes a previously created a multipart upload by uploading a stream of ByteStrings to a specified location @@ -931,7 +924,7 @@ import scala.util.{ Failure, Success, Try } } chunkAndRequest(s3Location, contentType, s3Headers, chunkSize, initialUpload)(chunkingParallelism) .prepend(Source(successfulParts)) - .toMat(completionSink(s3Location, s3Headers))(Keep.right) + .toMat(completionSink(s3Headers))(Keep.right) } /** @@ -954,7 +947,7 @@ import scala.util.{ Failure, Success, Try } } chunkAndRequestWithContext[C](s3Location, contentType, s3Headers, chunkSize, chunkUploadSink, initialUpload)( chunkingParallelism).prepend(Source(successfulParts)) - .toMat(completionSink(s3Location, s3Headers))(Keep.right) + .toMat(completionSink(s3Headers))(Keep.right) } def completeMultipartUpload( @@ -966,7 +959,7 @@ import scala.util.{ Failure, Success, Try } SuccessfulUploadPart(MultipartUpload(s3Location.bucket, s3Location.key, uploadId), part.partNumber, part.eTag) } Source(successfulParts) - .toMat(completionSink(s3Location, s3Headers).withAttributes(attr))(Keep.right) + .toMat(completionSink(s3Headers).withAttributes(attr))(Keep.right) .run() } @@ -1018,7 +1011,7 @@ import scala.util.{ Failure, Success, Try } // The individual copy upload part requests are processed here processUploadCopyPartRequests(copyRequests)(chunkingParallelism) - .toMat(completionSink(targetLocation, s3Headers))(Keep.right) + .toMat(completionSink(s3Headers))(Keep.right) } private def computeMetaData(headers: immutable.Seq[HttpHeader], entity: ResponseEntity): ObjectMetadata = @@ -1031,7 +1024,7 @@ import scala.util.{ Failure, Success, Try } // `Content-Type` header is by design not accessible as header. So need to have a custom // header implementation to expose that - private case class CustomContentTypeHeader(contentType: ContentType) extends CustomHeader { + private final case class CustomContentTypeHeader(contentType: ContentType) extends CustomHeader { override def name(): String = "Content-Type" override def value(): String = contentType.value @@ -1041,7 +1034,7 @@ import scala.util.{ Failure, Success, Try } override def renderInResponses(): Boolean = true } - private def completeMultipartUpload(s3Location: S3Location, + private def completeMultipartUpload( parts: immutable.Seq[SuccessfulUploadPart], s3Headers: S3Headers)( implicit mat: Materializer, @@ -1090,7 +1083,7 @@ import scala.util.{ Failure, Success, Try } ConnectionPoolSettings(system).withConnectionSettings(ClientConnectionSettings(system).withTransport(transport)) }) - private case class ChangeTargetEndpointTransport(address: InetSocketAddress) extends ClientTransport { + private final case class ChangeTargetEndpointTransport(address: InetSocketAddress) extends ClientTransport { def connectTo(ignoredHost: String, ignoredPort: Int, settings: ClientConnectionSettings)( implicit system: ActorSystem): Flow[ByteString, ByteString, Future[OutgoingConnection]] = Tcp() @@ -1118,7 +1111,7 @@ import scala.util.{ Failure, Success, Try } initialUploadState: Option[(String, Int)] = None)( parallelism: Int): Flow[ByteString, UploadPartResponse, NotUsed] = { - def getChunkBuffer(chunkSize: Int, bufferSize: Int, maxRetriesPerChunk: Int)(implicit settings: S3Settings) = + def getChunkBuffer(bufferSize: Int, maxRetriesPerChunk: Int)(implicit settings: S3Settings) = settings.bufferType match { case MemoryBufferType => new MemoryBuffer(bufferSize) @@ -1135,7 +1128,7 @@ import scala.util.{ Failure, Success, Try } assert( chunkSize >= MinChunkSize, - s"Chunk size must be at least 5 MB = $MinChunkSize bytes (was $chunkSize bytes). See http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html") + s"Chunk size must be at least 5 MB = $MinChunkSize bytes (was $chunkSize bytes). See https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html") val chunkBufferSize = chunkSize * 2 @@ -1153,11 +1146,10 @@ import scala.util.{ Failure, Success, Try } .prefixAndTail(1) .flatMapConcat { case (prefix, tail) => - if (prefix.nonEmpty) { + if (prefix.nonEmpty) Source(prefix).concat(tail) - } else { + else Source.single(MemoryChunk(ByteString.empty)) - } } val retriableFlow: Flow[(Chunk, (MultipartUpload, Int)), (Try[HttpResponse], (MultipartUpload, Int)), NotUsed] = @@ -1178,7 +1170,7 @@ import scala.util.{ Failure, Success, Try } import conf.multipartUploadSettings.retrySettings._ SplitAfterSize(chunkSize, chunkBufferSize)(atLeastOneByteString) - .via(getChunkBuffer(chunkSize, chunkBufferSize, maxRetries)) // creates the chunks + .via(getChunkBuffer(chunkBufferSize, maxRetries)) // creates the chunks .mergeSubstreamsWithParallelism(parallelism) .filter(_.size > 0) .via(atLeastOne) @@ -1225,7 +1217,7 @@ import scala.util.{ Failure, Success, Try } assert( chunkSize >= MinChunkSize, - s"Chunk size must be at least 5 MB = $MinChunkSize bytes (was $chunkSize bytes). See http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html") + s"Chunk size must be at least 5 MB = $MinChunkSize bytes (was $chunkSize bytes). See https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html") val chunkBufferSize = chunkSize * 2 @@ -1243,11 +1235,10 @@ import scala.util.{ Failure, Success, Try } .prefixAndTail(1) .flatMapConcat { case (prefix, tail) => - if (prefix.nonEmpty) { + if (prefix.nonEmpty) Source(prefix).concat(tail) - } else { + else Source.single((MemoryChunk(ByteString.empty), immutable.Iterable.empty)) - } } val retriableFlow: Flow[((Chunk, (MultipartUpload, Int)), immutable.Iterable[C]), ((Try[HttpResponse], ( @@ -1301,9 +1292,8 @@ import scala.util.{ Failure, Success, Try } if (isTransientError(r.status)) { r.entity.discardBytes() Some((chunkAndUploadInfo, allContext)) - } else { + } else None - } case ((chunkAndUploadInfo, allContext), ((Failure(_), _), _)) => // Treat any exception as transient. Some((chunkAndUploadInfo, allContext)) @@ -1376,7 +1366,6 @@ import scala.util.{ Failure, Success, Try } } private def completionSink( - s3Location: S3Location, s3Headers: S3Headers): Sink[UploadPartResponse, Future[MultipartUploadResult]] = Sink .fromMaterializer { (mat, attr) => @@ -1390,15 +1379,14 @@ import scala.util.{ Failure, Success, Try } .flatMap { (responses: immutable.Seq[UploadPartResponse]) => val successes = responses.collect { case r: SuccessfulUploadPart => r } val failures = responses.collect { case r: FailedUploadPart => r } - if (responses.isEmpty) { + if (responses.isEmpty) Future.failed(new RuntimeException("No Responses")) - } else if (failures.isEmpty) { + else if (failures.isEmpty) Future.successful(successes.sortBy(_.partNumber)) - } else { + else Future.failed(FailedUpload(failures.map(_.exception))) - } } - .flatMap(completeMultipartUpload(s3Location, _, s3Headers)) + .flatMap(completeMultipartUpload(_, s3Headers)) } .mapMaterializedValue(_.map(r => MultipartUploadResult(r.location, r.bucket, r.key, r.eTag, r.versionId))) } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSize.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSize.scala index f8eec65f4..459c901e8 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSize.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSize.scala @@ -47,9 +47,9 @@ import scala.annotation.tailrec private def insertMarkers(minChunkSize: Long, maxChunkSize: Int): GraphStage[FlowShape[ByteString, Any]] = new GraphStage[FlowShape[ByteString, Any]] { - val in = Inlet[ByteString]("SplitAfterSize.in") - val out = Outlet[Any]("SplitAfterSize.out") - override val shape = FlowShape.of(in, out) + val in: Inlet[ByteString] = Inlet[ByteString]("SplitAfterSize.in") + val out: Outlet[Any] = Outlet[Any]("SplitAfterSize.out") + override val shape: FlowShape[ByteString, Any] = FlowShape.of(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler with InHandler { @@ -59,9 +59,9 @@ import scala.annotation.tailrec override def onPush(): Unit = { val elem = grab(in) count += elem.size - if (count > maxChunkSize) { + if (count > maxChunkSize) splitElement(elem, elem.size - (count - maxChunkSize)) - } else if (count >= minChunkSize) { + else if (count >= minChunkSize) { count = 0 emitMultiple(out, elem :: NewStream :: Nil) } else emit(out, elem) diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeWithContext.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeWithContext.scala index 51cd0594c..44efe7260 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeWithContext.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeWithContext.scala @@ -44,9 +44,9 @@ import pekko.util.ByteString private def insertMarkers[C](minChunkSize: Long) = new GraphStage[FlowShape[(ByteString, C), Any]] { - val in = Inlet[(ByteString, C)]("SplitAfterSize.in") - val out = Outlet[Any]("SplitAfterSize.out") - override val shape = FlowShape.of(in, out) + val in: Inlet[(ByteString, C)] = Inlet[(ByteString, C)]("SplitAfterSize.in") + val out: Outlet[Any] = Outlet[Any]("SplitAfterSize.out") + override val shape: FlowShape[(ByteString, C), Any] = FlowShape.of(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler with InHandler { diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala index f8270026f..e9869fa0b 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala @@ -56,17 +56,17 @@ import pekko.http.scaladsl.model.{ HttpHeader, HttpRequest } } // https://tools.ietf.org/html/rfc3986#section-2.3 - def isUnreservedCharacter(c: Char): Boolean = + private def isUnreservedCharacter(c: Char): Boolean = c.isLetterOrDigit || c == '-' || c == '.' || c == '_' || c == '~' // https://tools.ietf.org/html/rfc3986#section-2.2 // Excludes "/" as it is an exception according to spec. val reservedCharacters: String = ":?#[]@!$&'()*+,;=" - def isReservedCharacter(c: Char): Boolean = + private def isReservedCharacter(c: Char): Boolean = reservedCharacters.contains(c) - def canonicalQueryString(query: Query): String = { + private def canonicalQueryString(query: Query): String = { def uriEncode(s: String): String = s.flatMap { case c if isUnreservedCharacter(c) => c.toString case c => "%" + Integer.toHexString(c).toUpperCase @@ -78,7 +78,7 @@ import pekko.http.scaladsl.model.{ HttpHeader, HttpRequest } .mkString("&") } - def canonicalHeaderString(headers: Seq[HttpHeader]): String = + private def canonicalHeaderString(headers: Seq[HttpHeader]): String = headers .groupBy(_.lowercaseName) .map { @@ -92,10 +92,10 @@ import pekko.http.scaladsl.model.{ HttpHeader, HttpRequest } .map { case (name, value) => s"$name:$value" } .mkString("\n") - def signedHeadersString(headers: Seq[HttpHeader]): String = + private def signedHeadersString(headers: Seq[HttpHeader]): String = headers.map(_.lowercaseName).distinct.sorted.mkString(";") - def pathEncode(path: Path): String = + private def pathEncode(path: Path): String = if (path.isEmpty) "/" else { path.toString.flatMap { diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SigningKey.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SigningKey.scala index 6fe970c47..835965e80 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SigningKey.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SigningKey.scala @@ -37,14 +37,14 @@ import software.amazon.awssdk.regions.Region def anonymous: Boolean = credentials.secretAccessKey() == None.orNull && credentials.accessKeyId() == None.orNull - val rawKey = new SecretKeySpec(s"AWS4${credentials.secretAccessKey}".getBytes, algorithm) + private val rawKey = new SecretKeySpec(s"AWS4${credentials.secretAccessKey}".getBytes, algorithm) val sessionToken: Option[String] = credentials match { case c: AwsSessionCredentials => Some(c.sessionToken) case _ => None } - def signature(message: Array[Byte]): Array[Byte] = signWithKey(key, message) + private def signature(message: Array[Byte]): Array[Byte] = signWithKey(key, message) def hexEncodedSignature(message: Array[Byte]): String = encodeHex(signature(message)) @@ -53,13 +53,13 @@ import software.amazon.awssdk.regions.Region lazy val key: SecretKeySpec = wrapSignature(dateRegionServiceKey, "aws4_request".getBytes) - lazy val dateRegionServiceKey: SecretKeySpec = + private lazy val dateRegionServiceKey: SecretKeySpec = wrapSignature(dateRegionKey, scope.awsService.getBytes) - lazy val dateRegionKey: SecretKeySpec = + private lazy val dateRegionKey: SecretKeySpec = wrapSignature(dateKey, scope.awsRegion.id.getBytes) - lazy val dateKey: SecretKeySpec = + private lazy val dateKey: SecretKeySpec = wrapSignature(rawKey, scope.formattedDate.getBytes) private def wrapSignature(signature: SecretKeySpec, message: Array[Byte]): SecretKeySpec = diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala index 306758cab..30141f11d 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala @@ -113,7 +113,7 @@ object MFAStatus { case object Disabled extends MFAStatus /** Java API */ - val disabled = Disabled + val disabled: Disabled.type = Disabled } @@ -125,10 +125,10 @@ object BucketVersioningStatus { case object Suspended extends BucketVersioningStatus /** Java API */ - val enabled = Enabled + val enabled: Enabled.type = Enabled /** Java API */ - val suspended = Suspended + val suspended: Suspended.type = Suspended } final class BucketVersioningResult private (val status: Option[BucketVersioningStatus], @@ -1503,7 +1503,7 @@ final class ObjectMetadata private ( } object ObjectMetadata { - def apply(metadata: Seq[HttpHeader]) = new ObjectMetadata(metadata) + def apply(metadata: Seq[HttpHeader]): ObjectMetadata = new ObjectMetadata(metadata) } /** @@ -1534,11 +1534,11 @@ object BucketAndKey { private val bucketRegexPathStyle = "(/\\.\\.)|(\\.\\./)".r private val bucketRegexDns = "[^a-z0-9\\-\\.]{1,255}|[\\.]{2,}".r - def pathStyleValid(bucket: String) = { + def pathStyleValid(bucket: String): Boolean = { bucketRegexPathStyle.findFirstIn(bucket).isEmpty && ".." != bucket } - def dnsValid(bucket: String) = { + def dnsValid(bucket: String): Boolean = { bucketRegexDns.findFirstIn(bucket).isEmpty } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala index 5e32a228a..5e7572d99 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala @@ -403,7 +403,7 @@ final class S3Settings private ( def withS3RegionProvider(value: AwsRegionProvider): S3Settings = copy(s3RegionProvider = value) def withAccessStyle(value: AccessStyle): S3Settings = - if (accessStyle == value) this else copy(accessStyle = value); + if (accessStyle == value) this else copy(accessStyle = value) def withEndpointUrl(value: String): S3Settings = copy(endpointUrl = Option(value)) def withListBucketApiVersion(value: ApiVersion): S3Settings = @@ -545,11 +545,10 @@ object S3Settings { s"'path-style-access' must be 'false', 'true' or 'force'. Got: [$other]. Prefer using access-style instead.") } - val endpointUrl = if (c.hasPath("endpoint-url")) { - Option(c.getString("endpoint-url")) - } else { - None - }.orElse(maybeProxy.map(p => s"${p.scheme}://${p.host}:${p.port}")) + val endpointUrl = (if (c.hasPath("endpoint-url")) + Option(c.getString("endpoint-url")) + else + None).orElse(maybeProxy.map(p => s"${p.scheme}://${p.host}:${p.port}")) if (endpointUrl.isEmpty && accessStyle == PathAccessStyle) log.warn( @@ -567,11 +566,11 @@ object S3Settings { val regionProvider = { val regionProviderPath = "aws.region.provider" - val staticRegionProvider = new AwsRegionProvider { + val staticRegionProvider: AwsRegionProvider = new AwsRegionProvider { lazy val getRegion: Region = Region.of(c.getString("aws.region.default-region")) } - if (c.hasPath(regionProviderPath)) { + if (c.hasPath(regionProviderPath)) c.getString(regionProviderPath) match { case "static" => staticRegionProvider @@ -579,15 +578,14 @@ object S3Settings { case _ => new DefaultAwsRegionProviderChain() } - } else { + else new DefaultAwsRegionProviderChain() - } } val credentialsProvider = { val credProviderPath = "aws.credentials.provider" - if (c.hasPath(credProviderPath)) { + if (c.hasPath(credProviderPath)) c.getString(credProviderPath) match { case "default" => DefaultCredentialsProvider.create() @@ -596,11 +594,10 @@ object S3Settings { val aki = c.getString("aws.credentials.access-key-id") val sak = c.getString("aws.credentials.secret-access-key") val tokenPath = "aws.credentials.token" - val creds: AwsCredentials = if (c.hasPath(tokenPath)) { + val creds: AwsCredentials = if (c.hasPath(tokenPath)) AwsSessionCredentials.create(aki, sak, c.getString(tokenPath)) - } else { + else AwsBasicCredentials.create(aki, sak) - } StaticCredentialsProvider.create(creds) case "anon" => @@ -609,9 +606,8 @@ object S3Settings { case _ => DefaultCredentialsProvider.create() } - } else { + else DefaultCredentialsProvider.create() - } } val apiVersion = Try(c.getInt("list-bucket-api-version") match { diff --git a/s3/src/test/java/docs/javadsl/S3Test.java b/s3/src/test/java/docs/javadsl/S3Test.java index c21ea8be6..ccc710de0 100644 --- a/s3/src/test/java/docs/javadsl/S3Test.java +++ b/s3/src/test/java/docs/javadsl/S3Test.java @@ -41,14 +41,12 @@ import org.junit.Rule; import org.junit.Test; -import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; public class S3Test extends S3WireMockBase { @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); @@ -269,7 +267,7 @@ public void rangedDownload() throws Exception { byte[] result = resultCompletionStage.toCompletableFuture().get(5, TimeUnit.SECONDS); - assertTrue(Arrays.equals(rangeOfBody(), result)); + assertArrayEquals(rangeOfBody(), result); } @Test @@ -289,7 +287,7 @@ public void rangedDownloadServerSideEncryption() throws Exception { byte[] result = resultCompletionStage.toCompletableFuture().get(5, TimeUnit.SECONDS); - assertTrue(Arrays.equals(rangeOfBodySSE(), result)); + assertArrayEquals(rangeOfBodySSE(), result); } @Test diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioContainer.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioContainer.scala index d301a9781..d79a3c1a7 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioContainer.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioContainer.scala @@ -30,7 +30,7 @@ class MinioContainer(accessKey: String, secretKey: String, domain: String) "MINIO_DOMAIN" -> domain)) { def getHostAddress: String = - s"http://${container.getContainerIpAddress}:${container.getMappedPort(9000)}" + s"http://${container.getHost}:${container.getMappedPort(9000)}" def getVirtualHost: String = s"http://{bucket}.$domain:${container.getMappedPort(9000)}" diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBufferSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBufferSpec.scala index 16651bfdb..b26eba2e9 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBufferSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/DiskBufferSpec.scala @@ -73,8 +73,8 @@ class DiskBufferSpec(_system: ActorSystem) } it should "delete its temp file after N materializations" in { - val tmpDir = Files.createTempDirectory("DiskBufferSpec").toFile() - val before = tmpDir.list().size + val tmpDir = Files.createTempDirectory("DiskBufferSpec").toFile + val before = tmpDir.list().length val chunk = Source(Vector(ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))) .via(new DiskBuffer(2, 200, Some(tmpDir.toPath))) .runWith(Sink.seq) @@ -84,14 +84,14 @@ class DiskBufferSpec(_system: ActorSystem) chunk shouldBe a[DiskChunk] val source = chunk.asInstanceOf[DiskChunk].data - tmpDir.list().size should be(before + 1) + tmpDir.list().length should be(before + 1) source.runWith(Sink.ignore).futureValue - tmpDir.list().size should be(before + 1) + tmpDir.list().length should be(before + 1) source.runWith(Sink.ignore).futureValue eventually { - tmpDir.list().size should be(before) + tmpDir.list().length should be(before) } } diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala index 2871e6537..938b62025 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala @@ -55,7 +55,7 @@ class SplitAfterSizeSpec(_system: ActorSystem) .via( SplitAfterSize(10, MaxChunkSize)(Flow[ByteString]) .prefixAndTail(10) - .map { case (prefix, tail) => prefix } + .map { case (prefix, _) => prefix } .concatSubstreams) .runWith(Sink.seq) .futureValue should be( @@ -69,7 +69,7 @@ class SplitAfterSizeSpec(_system: ActorSystem) .via( SplitAfterSize(10, maxChunkSize = 15)(Flow[ByteString]) .prefixAndTail(10) - .map { case (prefix, tail) => prefix } + .map { case (prefix, _) => prefix } .concatSubstreams) .runWith(Sink.seq) .futureValue should be( @@ -83,7 +83,7 @@ class SplitAfterSizeSpec(_system: ActorSystem) .via( SplitAfterSize(10, maxChunkSize = 15)(Flow[ByteString]) .prefixAndTail(10) - .map { case (prefix, tail) => prefix } + .map { case (prefix, _) => prefix } .concatSubstreams) .runWith(Sink.seq) .futureValue should be( @@ -97,7 +97,7 @@ class SplitAfterSizeSpec(_system: ActorSystem) .via( SplitAfterSize(10, maxChunkSize = 15)(Flow[ByteString]) .prefixAndTail(10) - .map { case (prefix, tail) => prefix } + .map { case (prefix, _) => prefix } .concatSubstreams) .runWith(Sink.seq) .futureValue should be( @@ -112,7 +112,7 @@ class SplitAfterSizeSpec(_system: ActorSystem) .via( SplitAfterSize(10, maxChunkSize = 15)(Flow[ByteString]) .prefixAndTail(10) - .map { case (prefix, tail) => prefix } + .map { case (prefix, _) => prefix } .concatSubstreams) .runWith(Sink.seq) .futureValue should be( diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/authSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/AuthSpec.scala similarity index 94% rename from s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/authSpec.scala rename to s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/AuthSpec.scala index 0086babdd..7016b84f5 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/authSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/AuthSpec.scala @@ -15,7 +15,7 @@ package org.apache.pekko.stream.connectors.s3.impl.auth import org.scalatest.flatspec.AnyFlatSpec -class authSpec extends AnyFlatSpec { +class AuthSpec extends AnyFlatSpec { "encodeHex" should "encode string to hex string" in { assert(encodeHex("1234+abcd".getBytes()) == "313233342b61626364") diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala index e13410191..0afd74dce 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala @@ -1295,8 +1295,8 @@ class AWSS3IntegrationSpec extends TestKit(ActorSystem("AWSS3IntegrationSpec")) // Since S3 accounts share global state, we should randomly generate bucket names so concurrent tests // against an S3 account don't conflict with each other override val randomlyGenerateBucketNames: Boolean = - sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.randomlyGenerateBucketNames") - .map(_.toBoolean).getOrElse(true) + sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.randomlyGenerateBucketNames").forall( + _.toBoolean) } /* @@ -1338,10 +1338,9 @@ object S3IntegrationSpec { val NonExistingBucket = "nowhere" val AWSS3EnableListAllMyBucketsTests = - sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.enableListAllMyBucketsTests") - .map(_.toBoolean).getOrElse(true) + sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.enableListAllMyBucketsTests").forall( + _.toBoolean) val AWSS3EnableMFATests = - sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.enableMFATests") - .map(_.toBoolean).getOrElse(true) + sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.enableMFATests").forall(_.toBoolean) } diff --git a/scripts/authors.scala b/scripts/authors.scala index 25348c832..e1f707a46 100755 --- a/scripts/authors.scala +++ b/scripts/authors.scala @@ -21,7 +21,7 @@ require(args.length == 2, "usage: authors prevTag currTag") val gitCmd = "git log --no-merges --shortstat -z --minimal -w -C " + args(0) + ".." + args(1) -case class Stats(name: String, email: String, commits: Int = 0, inserts: Int = 0, deletes: Int = 0, filesChanged: Int = 0) +final case class Stats(name: String, email: String, commits: Int = 0, inserts: Int = 0, deletes: Int = 0, filesChanged: Int = 0) val AuthorExp = """Author: (.*) <([^>]+)>""".r val FilesExp = """(\d+)\sfile[s]? changed""".r diff --git a/simple-codecs/src/main/scala/org/apache/pekko/stream/connectors/recordio/impl/RecordIOFramingStage.scala b/simple-codecs/src/main/scala/org/apache/pekko/stream/connectors/recordio/impl/RecordIOFramingStage.scala index 0fa5d59c6..ea033b3e4 100644 --- a/simple-codecs/src/main/scala/org/apache/pekko/stream/connectors/recordio/impl/RecordIOFramingStage.scala +++ b/simple-codecs/src/main/scala/org/apache/pekko/stream/connectors/recordio/impl/RecordIOFramingStage.scala @@ -33,8 +33,8 @@ private[recordio] class RecordIOFramingStage(maxRecordLength: Int) import RecordIOFramingStage._ - val in = Inlet[ByteString]("RecordIOFramingStage.in") - val out = Outlet[ByteString]("RecordIOFramingStage.out") + val in: Inlet[ByteString] = Inlet[ByteString]("RecordIOFramingStage.in") + val out: Outlet[ByteString] = Outlet[ByteString]("RecordIOFramingStage.out") override val shape: FlowShape[ByteString, ByteString] = FlowShape(in, out) override def initialAttributes: Attributes = name("recordIOFraming") @@ -59,16 +59,16 @@ private[recordio] class RecordIOFramingStage(maxRecordLength: Int) override def onPull(): Unit = doParse() override def onUpstreamFinish(): Unit = - if (buffer.isEmpty) { + if (buffer.isEmpty) completeStage() - } else if (isAvailable(out)) { + else if (isAvailable(out)) doParse() - } // else swallow the termination and wait for pull + // else swallow the termination and wait for pull private def tryPull(): Unit = - if (isClosed(in)) { + if (isClosed(in)) failStage(new FramingException("Stream finished but there was a truncated final record in the buffer.")) - } else pull(in) + else pull(in) @tailrec private def doParse(): Unit = diff --git a/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/package.scala b/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/package.scala index 8a9926ad4..e4a7f9483 100644 --- a/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/package.scala +++ b/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/package.scala @@ -37,7 +37,7 @@ sealed abstract class SlickSession { } private[slick] abstract class SlickSessionFactory { - protected final class SlickSessionConfigBackedImpl(val slick: DatabaseConfig[JdbcProfile]) extends SlickSession { + private final class SlickSessionConfigBackedImpl(val slick: DatabaseConfig[JdbcProfile]) extends SlickSession { val db: JdbcBackend#Database = slick.db val profile: JdbcProfile = slick.profile } @@ -66,20 +66,20 @@ object SlickSession extends SlickSessionFactory * in SlickSource to map result set rows back to Java objects. */ final class SlickRow private[javadsl] (delegate: PositionedResult) { - final def nextBoolean(): java.lang.Boolean = delegate.nextBoolean() - final def nextBigDecimal(): java.math.BigDecimal = delegate.nextBigDecimal().bigDecimal - final def nextBlob(): java.sql.Blob = delegate.nextBlob() - final def nextByte(): java.lang.Byte = delegate.nextByte() - final def nextBytes(): Array[java.lang.Byte] = delegate.nextBytes().map(Byte.box(_)) - final def nextClob(): java.sql.Clob = delegate.nextClob() - final def nextDate(): java.sql.Date = delegate.nextDate() - final def nextDouble(): java.lang.Double = delegate.nextDouble() - final def nextFloat(): java.lang.Float = delegate.nextFloat() - final def nextInt(): java.lang.Integer = delegate.nextInt() - final def nextLong(): java.lang.Long = delegate.nextLong() - final def nextObject(): java.lang.Object = delegate.nextObject() - final def nextShort(): java.lang.Short = delegate.nextShort() - final def nextString(): java.lang.String = delegate.nextString() - final def nextTime(): java.sql.Time = delegate.nextTime() - final def nextTimestamp(): java.sql.Timestamp = delegate.nextTimestamp() + def nextBoolean(): java.lang.Boolean = delegate.nextBoolean() + def nextBigDecimal(): java.math.BigDecimal = delegate.nextBigDecimal().bigDecimal + def nextBlob(): java.sql.Blob = delegate.nextBlob() + def nextByte(): java.lang.Byte = delegate.nextByte() + def nextBytes(): Array[java.lang.Byte] = delegate.nextBytes().map(Byte.box) + def nextClob(): java.sql.Clob = delegate.nextClob() + def nextDate(): java.sql.Date = delegate.nextDate() + def nextDouble(): java.lang.Double = delegate.nextDouble() + def nextFloat(): java.lang.Float = delegate.nextFloat() + def nextInt(): java.lang.Integer = delegate.nextInt() + def nextLong(): java.lang.Long = delegate.nextLong() + def nextObject(): java.lang.Object = delegate.nextObject() + def nextShort(): java.lang.Short = delegate.nextShort() + def nextString(): java.lang.String = delegate.nextString() + def nextTime(): java.sql.Time = delegate.nextTime() + def nextTimestamp(): java.sql.Timestamp = delegate.nextTimestamp() } diff --git a/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java b/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java index 131d50ee6..1d8831e94 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java @@ -111,8 +111,6 @@ public static void main(String[] args) throws Exception { // #flowWithPassThrough-example done.whenComplete( - (value, exception) -> { - system.terminate(); - }); + (value, exception) -> system.terminate()); } } diff --git a/slick/src/test/java/docs/javadsl/DocSnippetSink.java b/slick/src/test/java/docs/javadsl/DocSnippetSink.java index aa139bbc5..057b9aafd 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetSink.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetSink.java @@ -57,8 +57,6 @@ public static void main(String[] args) throws Exception { // #sink-example done.whenComplete( - (value, exception) -> { - system.terminate(); - }); + (value, exception) -> system.terminate()); } } diff --git a/slick/src/test/java/docs/javadsl/DocSnippetSource.java b/slick/src/test/java/docs/javadsl/DocSnippetSource.java index 28f45d631..cb5aa8b77 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetSource.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetSource.java @@ -41,8 +41,6 @@ public static void main(String[] args) throws Exception { // #source-example done.whenComplete( - (value, exception) -> { - system.terminate(); - }); + (value, exception) -> system.terminate()); } } diff --git a/slick/src/test/java/docs/javadsl/User.java b/slick/src/test/java/docs/javadsl/User.java index 40db1c89a..e7df138a2 100644 --- a/slick/src/test/java/docs/javadsl/User.java +++ b/slick/src/test/java/docs/javadsl/User.java @@ -13,6 +13,8 @@ package docs.javadsl; +import java.util.Objects; + public class User { public final Integer id; public final String name; @@ -39,12 +41,9 @@ public boolean equals(Object obj) { return false; } final User other = (User) obj; - if ((this.name == null) ? (other.name != null) : !this.name.equals(other.name)) { - return false; - } - if (this.id != other.id) { + if (!Objects.equals(this.name, other.name)) { return false; } - return true; + return Objects.equals(this.id, other.id); } } diff --git a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala index 1e4bc7924..34d4fa824 100644 --- a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala +++ b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala @@ -36,7 +36,7 @@ object SlickSourceWithPlainSQLQueryExample extends App { system.registerOnTermination(session.close()) // The example domain - case class User(id: Int, name: String) + final case class User(id: Int, name: String) // We need this to automatically transform result rows // into instances of the User class. @@ -57,10 +57,7 @@ object SlickSourceWithPlainSQLQueryExample extends App { .runWith(Sink.ignore) // #source-example - done.onComplete { - case _ => - system.terminate() - } + done.onComplete(_ => system.terminate()) } object SlickSourceWithTypedQueryExample extends App { @@ -89,10 +86,7 @@ object SlickSourceWithTypedQueryExample extends App { .runWith(Sink.ignore) // #source-with-typed-query - done.onComplete { - case _ => - system.terminate() - } + done.onComplete(_ => system.terminate()) } object SlickSinkExample extends App { @@ -104,7 +98,7 @@ object SlickSinkExample extends App { system.registerOnTermination(session.close()) // The example domain - case class User(id: Int, name: String) + final case class User(id: Int, name: String) val users = (1 to 42).map(i => User(i, s"Name$i")) // This import enables the use of the Slick sql"...", @@ -121,10 +115,7 @@ object SlickSinkExample extends App { sqlu"INSERT INTO PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})")) // #sink-example - done.onComplete { - case _ => - system.terminate() - } + done.onComplete(_ => system.terminate()) } object SlickFlowExample extends App { @@ -136,7 +127,7 @@ object SlickFlowExample extends App { system.registerOnTermination(session.close()) // The example domain - case class User(id: Int, name: String) + final case class User(id: Int, name: String) val users = (1 to 42).map(i => User(i, s"Name$i")) // This import enables the use of the Slick sql"...", @@ -164,10 +155,10 @@ object SlickFlowExample extends App { object SlickFlowWithPassThroughExample extends App { // mimics a Kafka 'Committable' type - case class CommittableOffset(offset: Int) { + final case class CommittableOffset(offset: Int) { def commit: Future[Done] = Future.successful(Done) } - case class KafkaMessage[A](msg: A, offset: CommittableOffset) { + final case class KafkaMessage[A](msg: A, offset: CommittableOffset) { // map the msg and keep the offset def map[B](f: A => B): KafkaMessage[B] = KafkaMessage(f(msg), offset) } @@ -180,7 +171,7 @@ object SlickFlowWithPassThroughExample extends App { system.registerOnTermination(session.close()) // The example domain - case class User(id: Int, name: String) + final case class User(id: Int, name: String) val users = (1 to 42).map(i => User(i, s"Name$i")) val messagesFromKafka = users.zipWithIndex.map { case (user, index) => KafkaMessage(user, CommittableOffset(index)) } @@ -210,8 +201,5 @@ object SlickFlowWithPassThroughExample extends App { .runWith(Sink.ignore) // #flowWithPassThrough-example - done.onComplete { - case _ => - system.terminate() - } + done.onComplete(_ => system.terminate()) } diff --git a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala index e448d3a7f..5e4f91f16 100644 --- a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala +++ b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala @@ -74,7 +74,7 @@ class SlickSpec sqlu"INSERT INTO PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})" def getAllUsersFromDb: Future[Set[User]] = Slick.source(selectAllUsers).runWith(Sink.seq).map(_.toSet) - def populate() = { + def populate(): Unit = { val actions = users.map(insertUser) // This uses the standard Slick API exposed by the Slick session @@ -172,7 +172,7 @@ class SlickSpec .runWith(Sink.seq) .futureValue - inserted must have size (users.size) + inserted must have size users.size inserted.toSet mustBe Set(1) getAllUsersFromDb.futureValue mustBe users @@ -184,7 +184,7 @@ class SlickSpec .runWith(Sink.seq) .futureValue - inserted must have size (users.size) + inserted must have size users.size inserted.toSet mustBe Set(1) getAllUsersFromDb.futureValue mustBe users @@ -194,7 +194,7 @@ class SlickSpec val inserted = Source(users) .grouped(10) .via( - Slick.flow(parallelism = 4, (group: Seq[User]) => group.map(insertUser(_)).reduceLeft(_.andThen(_)))) + Slick.flow(parallelism = 4, (group: Seq[User]) => group.map(insertUser).reduceLeft(_.andThen(_)))) .runWith(Sink.seq) .futureValue @@ -215,7 +215,7 @@ class SlickSpec .runWith(Sink.seq) .futureValue - inserted must have size (users.size) + inserted must have size users.size inserted.map(_._1).toSet mustBe users inserted.map(_._2).toSet mustBe Set(1) @@ -231,7 +231,7 @@ class SlickSpec .runWith(Sink.seq) .futureValue - inserted must have size (users.size) + inserted must have size users.size inserted.map(_._1).toSet mustBe users inserted.map(_._2).toSet mustBe Set(1) @@ -251,7 +251,7 @@ class SlickSpec .runWith(Sink.fold(Seq.empty[(User, Int)])((a, b) => a ++ b)) .futureValue - inserted must have size (users.size) + inserted must have size users.size inserted.map(_._1).toSet mustBe users inserted.map(_._2).toSet mustBe Set(1) @@ -265,8 +265,8 @@ class SlickSpec // After we've written them to a db with Slick, we want // to commit the offset to Kafka - case class KafkaOffset(offset: Int) - case class KafkaMessage[A](msg: A, offset: KafkaOffset) { + final case class KafkaOffset(offset: Int) + final case class KafkaMessage[A](msg: A, offset: KafkaOffset) { // map the msg and keep the offset def map[B](f: A => B): KafkaMessage[B] = KafkaMessage(f(msg), offset) } @@ -295,7 +295,7 @@ class SlickSpec Await.ready(f1, Duration.Inf) // Make sure all messages was committed to kafka - committedOffsets.map(_.offset).sorted mustBe ((0 until (users.size)).toList) + committedOffsets.map(_.offset).sorted mustBe (0 until users.size).toList // Assert that all docs were written to db getAllUsersFromDb.futureValue mustBe users @@ -323,7 +323,7 @@ class SlickSpec Source(users) .grouped(10) .runWith( - Slick.sink(parallelism = 4, (group: Seq[User]) => group.map(insertUser(_)).reduceLeft(_.andThen(_)))) + Slick.sink(parallelism = 4, (group: Seq[User]) => group.map(insertUser).reduceLeft(_.andThen(_)))) .futureValue getAllUsersFromDb.futureValue mustBe users diff --git a/sns/src/test/java/docs/javadsl/SnsPublisherTest.java b/sns/src/test/java/docs/javadsl/SnsPublisherTest.java index b05d9fe46..7d25ce54b 100644 --- a/sns/src/test/java/docs/javadsl/SnsPublisherTest.java +++ b/sns/src/test/java/docs/javadsl/SnsPublisherTest.java @@ -94,7 +94,7 @@ static SnsAsyncClient createSnsClient() { // .overrideConfiguration(...) .build(); - system.registerOnTermination(() -> awsSnsClient.close()); + system.registerOnTermination(awsSnsClient::close); // #init-client return awsSnsClient; diff --git a/sns/src/test/scala/org/apache/pekko/stream/connectors/sns/SnsPublishMockingSpec.scala b/sns/src/test/scala/org/apache/pekko/stream/connectors/sns/SnsPublishMockingSpec.scala index b4ffcb319..44fa6751a 100644 --- a/sns/src/test/scala/org/apache/pekko/stream/connectors/sns/SnsPublishMockingSpec.scala +++ b/sns/src/test/scala/org/apache/pekko/stream/connectors/sns/SnsPublishMockingSpec.scala @@ -151,7 +151,7 @@ class SnsPublishMockingSpec extends AnyFlatSpec with DefaultTestContext with Mat } it should "fail stage if upstream failure occurs" in { - case class MyCustomException(message: String) extends Exception(message) + final case class MyCustomException(message: String) extends Exception(message) val (probe, future) = TestSource.probe[String].via(SnsPublisher.flow("topic-arn")).toMat(Sink.seq)(Keep.both).run() probe.sendError(MyCustomException("upstream failure")) diff --git a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrMessages.scala b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrMessages.scala index d4b2caa25..9ea92a017 100644 --- a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrMessages.scala +++ b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrMessages.scala @@ -105,7 +105,7 @@ final class WriteMessage[T, C] private ( updates = updates, passThrough = passThrough) - override def toString = + override def toString: String = "WriteMessage(" + s"operation=$operation," + s"idField=$idField," + diff --git a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrUpdateSettings.scala b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrUpdateSettings.scala index 88da9df4c..d7c5415ff 100644 --- a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrUpdateSettings.scala +++ b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/SolrUpdateSettings.scala @@ -25,7 +25,7 @@ final class SolrUpdateSettings private ( commitWithin: Int): SolrUpdateSettings = new SolrUpdateSettings( commitWithin = commitWithin) - override def toString = + override def toString: String = "SolrUpdateSettings(" + s"commitWithin=$commitWithin" + ")" @@ -33,7 +33,7 @@ final class SolrUpdateSettings private ( object SolrUpdateSettings { - val Defaults = new SolrUpdateSettings(-1) + val Defaults: SolrUpdateSettings = new SolrUpdateSettings(-1) /** Scala API */ def apply(): SolrUpdateSettings = Defaults diff --git a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrFlowStage.scala b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrFlowStage.scala index 6a1e785b2..96d63d81b 100644 --- a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrFlowStage.scala +++ b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrFlowStage.scala @@ -46,7 +46,8 @@ private[solr] final class SolrFlowStage[T, C]( private val in = Inlet[immutable.Seq[WriteMessage[T, C]]]("messages") private val out = Outlet[immutable.Seq[WriteResult[T, C]]]("result") - override val shape = FlowShape(in, out) + override val shape: FlowShape[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]]] = + FlowShape(in, out) override protected def initialAttributes: Attributes = super.initialAttributes and Attributes(ActorAttributes.IODispatcher) @@ -90,9 +91,8 @@ private final class SolrFlowLogic[T, C]( } private def tryPull(): Unit = - if (!isClosed(in) && !hasBeenPulled(in)) { + if (!isClosed(in) && !hasBeenPulled(in)) pull(in) - } private def updateBulkToSolr(messages: immutable.Seq[WriteMessage[T, C]]): UpdateResponse = { val docs = messages.flatMap(_.source.map(messageBinder)) @@ -113,12 +113,11 @@ private final class SolrFlowLogic[T, C]( message.routingFieldValue.foreach { routingFieldValue => val routingField = client match { - case csc: CloudSolrClient => { + case csc: CloudSolrClient => val docCollection = Option(csc.getZkStateReader.getCollection(collection)) docCollection.flatMap { dc => Option(dc.getRouter.getRouteField(dc)) } - } case _ => None } routingField.foreach { routingField => @@ -130,10 +129,9 @@ private final class SolrFlowLogic[T, C]( } message.updates.foreach { - case (field, updates) => { - val jMap = updates.asInstanceOf[Map[String, Any]].asJava + case (field, updates) => + val jMap = updates.asJava doc.addField(field, jMap) - } } doc } @@ -183,11 +181,10 @@ private final class SolrFlowLogic[T, C]( case DeleteByQuery => Option(deleteEachByQuery(current)) case PassThrough => None } - if (remaining.nonEmpty) { + if (remaining.nonEmpty) send(remaining) - } else { + else response - } } val response = if (messages.nonEmpty) send(messages).fold(0) { _.getStatus } diff --git a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrSourceStage.scala b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrSourceStage.scala index fcadf7a22..7521267f0 100644 --- a/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrSourceStage.scala +++ b/solr/src/main/scala/org/apache/pekko/stream/connectors/solr/impl/SolrSourceStage.scala @@ -55,13 +55,12 @@ private[solr] final class SolrSourceStage(tupleStream: TupleStream) extends Grap private def fetchFromSolr(): Unit = { val tuple = tupleStream.read() - if (tuple.EOF) { + if (tuple.EOF) completeStage() - } else if (tuple.EXCEPTION) { + else if (tuple.EXCEPTION) failStage(new IllegalStateException(tuple.getException)) - } else { + else emit(out, tuple) - } } } diff --git a/solr/src/test/scala/docs/scaladsl/SolrSpec.scala b/solr/src/test/scala/docs/scaladsl/SolrSpec.scala index e1a24c2f9..54cec2e53 100644 --- a/solr/src/test/scala/docs/scaladsl/SolrSpec.scala +++ b/solr/src/test/scala/docs/scaladsl/SolrSpec.scala @@ -132,7 +132,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca import org.apache.solr.client.solrj.beans.Field import scala.annotation.meta.field - case class BookBean(@(Field @field) title: String) + final case class BookBean(@(Field @field) title: String) // #define-bean // #run-bean @@ -270,21 +270,16 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca var committedOffsets = List[CommittableOffset]() - case class CommittableOffset(offset: Int) { - def commitScaladsl(): Future[Done] = { - committedOffsets = committedOffsets :+ this - Future.successful(Done) - } - } + final case class CommittableOffset(offset: Int) - case class CommittableOffsetBatch(offsets: immutable.Seq[CommittableOffset]) { + final case class CommittableOffsetBatch(offsets: immutable.Seq[CommittableOffset]) { def commitScaladsl(): Future[Done] = { committedOffsets = committedOffsets ++ offsets Future.successful(Done) } } - case class CommittableMessage(book: Book, committableOffset: CommittableOffset) + final case class CommittableMessage(book: Book, committableOffset: CommittableOffset) val messagesFromKafka = List( CommittableMessage(Book("Book 1"), CommittableOffset(0)), @@ -633,22 +628,15 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca var committedOffsets = List[CommittableOffset]() - case class CommittableOffset(offset: Int) { - def commitScaladsl(): Future[Done] = { - committedOffsets = committedOffsets :+ this - Future.successful(Done) - } - } + final case class CommittableOffset(offset: Int) - case class CommittableOffsetBatch(offsets: immutable.Seq[CommittableOffset]) { + final case class CommittableOffsetBatch(offsets: immutable.Seq[CommittableOffset]) { def commitScaladsl(): Future[Done] = { committedOffsets = committedOffsets ++ offsets Future.successful(Done) } } - case class CommittableMessage(book: Book, committableOffset: CommittableOffset) - val messagesFromKafka = List( CommittableOffset(0), CommittableOffset(1), diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsModel.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsModel.scala index 2ced4a1f2..c50b9091f 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsModel.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsModel.scala @@ -45,7 +45,7 @@ object MessageAction { override def hashCode(): Int = java.util.Objects.hash(message) } - final object Delete { + object Delete { def apply(message: Message): Delete = new Delete(message) } @@ -63,7 +63,7 @@ object MessageAction { override def hashCode(): Int = java.util.Objects.hash(message) } - final object Ignore { + object Ignore { def apply(message: Message): Ignore = new Ignore(message) } diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsPublishGroupedSettings.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsPublishGroupedSettings.scala index daa2dd717..dcef523e6 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsPublishGroupedSettings.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsPublishGroupedSettings.scala @@ -43,7 +43,7 @@ final class SqsPublishGroupedSettings private (val maxBatchSize: Int, maxBatchWait = maxBatchWait, concurrentRequests = concurrentRequests) - override def toString = + override def toString: String = "SqsPublishGroupedSettings(" + s"maxBatchSize=$maxBatchSize," + s"maxBatchWait=$maxBatchWait," + diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsSourceSettings.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsSourceSettings.scala index 79f464a43..cbef218f6 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsSourceSettings.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/SqsSourceSettings.scala @@ -237,12 +237,12 @@ case object SequenceNumber extends MessageSystemAttributeName(model.MessageSyste * https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html#API_ReceiveMessage_RequestParameters */ object MessageSystemAttributeName { - val all = All - val approximateFirstReceiveTimestamp = ApproximateFirstReceiveTimestamp - val approximateReceiveCount = ApproximateReceiveCount - val senderId = SenderId - val sentTimestamp = SentTimestamp - val messageDeduplicationId = MessageDeduplicationId - val messageGroupId = MessageGroupId - val sequenceNumber = SequenceNumber + val all: All.type = All + val approximateFirstReceiveTimestamp: ApproximateFirstReceiveTimestamp.type = ApproximateFirstReceiveTimestamp + val approximateReceiveCount: ApproximateReceiveCount.type = ApproximateReceiveCount + val senderId: SenderId.type = SenderId + val sentTimestamp: SentTimestamp.type = SentTimestamp + val messageDeduplicationId: MessageDeduplicationId.type = MessageDeduplicationId + val messageGroupId: MessageGroupId.type = MessageGroupId + val sequenceNumber: SequenceNumber.type = SequenceNumber } diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/impl/BalancingMapAsync.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/impl/BalancingMapAsync.scala index 2416f8019..66a7d4b20 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/impl/BalancingMapAsync.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/impl/BalancingMapAsync.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.sqs.impl import org.apache.pekko +import org.apache.pekko.stream.Supervision.Decider import pekko.annotation.InternalApi import pekko.stream.ActorAttributes.SupervisionStrategy import pekko.stream.Attributes.name @@ -31,8 +32,7 @@ import scala.util.{ Failure, Success } * Internal API. */ @InternalApi private[impl] object BufferImpl { - val FixedQueueSize = 128 - val FixedQueueMask = 127 + private val FixedQueueSize = 128 def apply[T](size: Int, effectiveAttributes: Attributes): Buffer[T] = apply(size, effectiveAttributes.mandatoryAttribute[ActorAttributes.MaxFixedBufferSize].size) @@ -52,16 +52,16 @@ import scala.util.{ Failure, Success } private val in = Inlet[In]("BalancingMapAsync.in") private val out = Outlet[Out]("BalancingMapAsync.out") - override def initialAttributes = name("BalancingMapAsync") + override def initialAttributes: Attributes = name("BalancingMapAsync") - override val shape = FlowShape(in, out) + override val shape: FlowShape[In, Out] = FlowShape(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider + lazy val decider: Decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider var buffer: Buffer[Holder[Out]] = _ - var parallelism = maxParallelism + var parallelism: Int = maxParallelism private val futureCB = getAsyncCallback[Holder[Out]](holder => holder.elem match { diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala index 8e3054141..5b2271fcb 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala @@ -61,12 +61,11 @@ object SqsSource { } private def resolveHandler(parallelism: Int)(implicit sqsClient: SqsAsyncClient) = - if (parallelism == 1) { + if (parallelism == 1) Flow[ReceiveMessageRequest].mapAsyncUnordered(parallelism)(sqsClient.receiveMessage(_).asScala) - } else { + else BalancingMapAsync[ReceiveMessageRequest, ReceiveMessageResponse]( parallelism, sqsClient.receiveMessage(_).asScala, (response, _) => if (response.messages().isEmpty) 1 else parallelism) - } } diff --git a/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala b/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala index d72c060fc..53d29247e 100644 --- a/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala +++ b/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala @@ -115,7 +115,7 @@ class SqsSourceMockSpec extends AnyFlatSpec with Matchers with DefaultTestContex def answer(invocation: InvocationOnMock) = { requestsCounter += 1 - if (requestsCounter > firstWithDataCount && requestsCounter <= firstWithDataCount + thenEmptyCount) { + if (requestsCounter > firstWithDataCount && requestsCounter <= firstWithDataCount + thenEmptyCount) pekko.pattern .after(timeout, system.scheduler) { Future.successful( @@ -126,13 +126,12 @@ class SqsSourceMockSpec extends AnyFlatSpec with Matchers with DefaultTestContex }(system.dispatcher) .asJava .toCompletableFuture - } else { + else CompletableFuture.completedFuture( ReceiveMessageResponse .builder() .messages(defaultMessages: _*) .build()) - } } }) diff --git a/sse/src/test/java/docs/javadsl/EventSourceTest.java b/sse/src/test/java/docs/javadsl/EventSourceTest.java index d0dcd0813..d1f1c209f 100644 --- a/sse/src/test/java/docs/javadsl/EventSourceTest.java +++ b/sse/src/test/java/docs/javadsl/EventSourceTest.java @@ -47,8 +47,7 @@ public static void compileTest() { // #event-source final Http http = Http.get(system); - Function> send = - (request) -> http.singleRequest(request); + Function> send = http::singleRequest; final Uri targetUri = Uri.create(String.format("http://%s:%d", host, port)); final Optional lastEventId = Optional.of("2"); diff --git a/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala b/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala index 2502ff912..c1a997666 100644 --- a/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala +++ b/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala @@ -46,10 +46,10 @@ import org.scalatest.wordspec.AsyncWordSpec object EventSourceSpec { - final object Server { + object Server { - private final case object Bind - private final case object Unbind + private case object Bind + private case object Unbind private def route(size: Int, setEventId: Boolean): Route = { import Directives._ @@ -202,7 +202,7 @@ final class EventSourceSpec extends AsyncWordSpec with Matchers with BeforeAndAf } } - override protected def afterAll() = { + override protected def afterAll(): Unit = { Await.ready(system.terminate(), 42.seconds) super.afterAll() } diff --git a/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/CapturingAppender.scala b/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/CapturingAppender.scala index c922c87d0..0444c00bf 100644 --- a/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/CapturingAppender.scala +++ b/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/CapturingAppender.scala @@ -56,8 +56,8 @@ import org.slf4j.LoggerFactory * the captured logging events are flushed to the appenders defined for the * org.apache.pekko.actor.testkit.typed.internal.CapturingAppenderDelegate logger. * - * The flushing on test failure is handled by [[pekko.actor.testkit.typed.scaladsl.LogCapturing]] - * for ScalaTest and [[pekko.actor.testkit.typed.javadsl.LogCapturing]] for JUnit. + * The flushing on test failure is handled by [[org.apache.pekko.stream.connectors.testkit.scaladsl.LogCapturing]] + * for ScalaTest and [[org.apache.pekko.stream.connectors.testkit.scaladsl.LogCapturing]] for JUnit. * * Use configuration like the following the logback-test.xml: * @@ -105,9 +105,8 @@ import org.slf4j.LoggerFactory for (event <- buffer; appender <- appenders) { if (sourceActorSystem.isEmpty || event.getMDCPropertyMap.get("sourceActorSystem") == null - || sourceActorSystem.contains(event.getMDCPropertyMap.get("sourceActorSystem"))) { + || sourceActorSystem.contains(event.getMDCPropertyMap.get("sourceActorSystem"))) appender.doAppend(event) - } } clear() } diff --git a/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/LogbackUtil.scala b/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/LogbackUtil.scala index 5aa368fb6..9f3bd64ce 100644 --- a/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/LogbackUtil.scala +++ b/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/LogbackUtil.scala @@ -34,8 +34,8 @@ import scala.annotation.tailrec @tailrec private def getLogbackLoggerInternal(loggerName: String, count: Int): ch.qos.logback.classic.Logger = { LoggerFactory.getLogger(loggerNameOrRoot(loggerName)) match { - case logger: ch.qos.logback.classic.Logger => logger - case logger: org.slf4j.helpers.SubstituteLogger if count > 0 => + case logger: ch.qos.logback.classic.Logger => logger + case _: org.slf4j.helpers.SubstituteLogger if count > 0 => // Wait for logging initialisation http://www.slf4j.org/codes.html#substituteLogger Thread.sleep(50) getLogbackLoggerInternal(loggerName, count - 1) diff --git a/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/javadsl/LogCapturingJunit4.scala b/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/javadsl/LogCapturingJunit4.scala index 9b7c6a514..50181aa52 100644 --- a/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/javadsl/LogCapturingJunit4.scala +++ b/testkit/src/main/scala/org/apache/pekko/stream/connectors/testkit/javadsl/LogCapturingJunit4.scala @@ -52,8 +52,8 @@ final class LogCapturingJunit4 extends TestRule { private val myLogger = LoggerFactory.getLogger(classOf[LogCapturingJunit4]) override def apply(base: Statement, description: Description): Statement = { - new Statement { - override def evaluate(): Unit = { + () => + { try { myLogger.info(s"Logging started for test [${description.getClassName}: ${description.getMethodName}]") base.evaluate() @@ -73,6 +73,5 @@ final class LogCapturingJunit4 extends TestRule { capturingAppender.clear() } } - } } } diff --git a/text/src/main/scala/org/apache/pekko/stream/connectors/text/impl/CharsetLogic.scala b/text/src/main/scala/org/apache/pekko/stream/connectors/text/impl/CharsetLogic.scala index f8a56d439..33f53e81b 100644 --- a/text/src/main/scala/org/apache/pekko/stream/connectors/text/impl/CharsetLogic.scala +++ b/text/src/main/scala/org/apache/pekko/stream/connectors/text/impl/CharsetLogic.scala @@ -37,12 +37,11 @@ private[impl] trait Decoding { protected def decode(bytes: ByteBuffer): Unit = { val chars = CharBuffer.allocate(bytes.limit()) val result = decoder.decode(bytes, chars, false) - if (result.isOverflow) { + if (result.isOverflow) failStage(new IllegalArgumentException(s"Incoming bytes decoded into more characters: $result")) - } else { - if (result.isError) { + else { + if (result.isError) result.throwException() - } val count = chars.position() chars.rewind() decoded(chars, count, bytes) @@ -81,12 +80,11 @@ private[impl] trait Encoding { protected def encode(chars: CharBuffer): Unit = { val bytes = ByteBuffer.allocate((chars.limit() * encoder.maxBytesPerChar().toDouble).toInt) val result = encoder.encode(chars, bytes, false) - if (result.isOverflow) { + if (result.isOverflow) failStage(new IllegalArgumentException(s"Incoming chars decoded into more than expected characters: $result")) - } else { - if (result.isError) { + else { + if (result.isError) result.throwException() - } val count = bytes.position() bytes.rewind() bytes.limit(count) diff --git a/text/src/test/java/docs/javadsl/CharsetCodingFlowsDoc.java b/text/src/test/java/docs/javadsl/CharsetCodingFlowsDoc.java index 5dd3d9d6e..8367b994a 100644 --- a/text/src/test/java/docs/javadsl/CharsetCodingFlowsDoc.java +++ b/text/src/test/java/docs/javadsl/CharsetCodingFlowsDoc.java @@ -28,7 +28,6 @@ // #encoding import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -41,8 +40,6 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; diff --git a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpBind.scala b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpBind.scala index f285662b2..8dbee57a8 100644 --- a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpBind.scala +++ b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpBind.scala @@ -42,7 +42,7 @@ import scala.concurrent.{ Future, Promise } private var listener: ActorRef = _ override def preStart(): Unit = { - implicit val sender = getStageActor(processIncoming).ref + implicit val sender: ActorRef = getStageActor(processIncoming).ref IO(Udp) ! Udp.Bind(sender, localAddress, options) } @@ -59,21 +59,19 @@ import scala.concurrent.{ Future, Promise } boundPromise.failure(ex) failStage(ex) case (_, Udp.Received(data, sender)) => - if (isAvailable(out)) { + if (isAvailable(out)) push(out, Datagram(data, sender)) - } case _ => } - private def unbindListener() = - if (listener != null) { + private def unbindListener(): Unit = + if (listener != null) listener ! Udp.Unbind - } setHandler( in, new InHandler { - override def onPush() = { + override def onPush(): Unit = { val msg = grab(in) listener ! Udp.Send(msg.data, msg.remote) pull(in) @@ -96,7 +94,8 @@ import scala.concurrent.{ Future, Promise } val out: Outlet[Datagram] = Outlet("UdpBindFlow.in") val shape: FlowShape[Datagram, Datagram] = FlowShape.of(in, out) - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (UdpBindLogic, Future[InetSocketAddress]) = { val boundPromise = Promise[InetSocketAddress]() (new UdpBindLogic(localAddress, options, boundPromise)(shape), boundPromise.future) } diff --git a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpSend.scala b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpSend.scala index 038be1d4f..5912476c6 100644 --- a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpSend.scala +++ b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/impl/UdpSend.scala @@ -55,15 +55,14 @@ import scala.collection.immutable.Iterable case _ => } - private def stopSimpleSender() = - if (simpleSender != null) { + private def stopSimpleSender(): Unit = + if (simpleSender != null) simpleSender ! PoisonPill - } setHandler( in, new InHandler { - override def onPush() = { + override def onPush(): Unit = { val msg = grab(in) simpleSender ! Udp.Send(msg.data, msg.remote) push(out, msg) diff --git a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/model.scala b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/model.scala index 3fc013670..bdf4aa745 100644 --- a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/model.scala +++ b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/model.scala @@ -19,9 +19,9 @@ import org.apache.pekko.util.ByteString final class Datagram private (val data: ByteString, val remote: InetSocketAddress) { - def withData(data: ByteString) = copy(data = data) + def withData(data: ByteString): Datagram = copy(data = data) - def withRemote(remote: InetSocketAddress) = copy(remote = remote) + def withRemote(remote: InetSocketAddress): Datagram = copy(remote = remote) /** * Java API @@ -44,10 +44,10 @@ final class Datagram private (val data: ByteString, val remote: InetSocketAddres } object Datagram { - def apply(data: ByteString, remote: InetSocketAddress) = new Datagram(data, remote) + def apply(data: ByteString, remote: InetSocketAddress): Datagram = new Datagram(data, remote) /** * Java API */ - def create(data: ByteString, remote: InetSocketAddress) = Datagram(data, remote) + def create(data: ByteString, remote: InetSocketAddress): Datagram = Datagram(data, remote) } diff --git a/udp/src/test/java/docs/javadsl/UdpTest.java b/udp/src/test/java/docs/javadsl/UdpTest.java index 346b9d175..79ed84436 100644 --- a/udp/src/test/java/docs/javadsl/UdpTest.java +++ b/udp/src/test/java/docs/javadsl/UdpTest.java @@ -118,7 +118,7 @@ List listAllBroadcastAddresses() throws SocketException { } networkInterface.getInterfaceAddresses().stream() - .map(a -> a.getBroadcast()) + .map(InterfaceAddress::getBroadcast) .filter(Objects::nonNull) .forEach(broadcastList::add); } diff --git a/udp/src/test/scala/docs/scaladsl/UdpSpec.scala b/udp/src/test/scala/docs/scaladsl/UdpSpec.scala index 8f58affa6..63bdfdfce 100644 --- a/udp/src/test/scala/docs/scaladsl/UdpSpec.scala +++ b/udp/src/test/scala/docs/scaladsl/UdpSpec.scala @@ -51,7 +51,7 @@ class UdpSpec private def msg(msg: String, destination: InetSocketAddress) = Datagram(ByteString(msg), destination) - override def afterAll() = + override def afterAll(): Unit = TestKit.shutdownActorSystem(system) "UDP stream" must { diff --git a/unix-domain-socket/src/main/mima-filters/1.1.x.backwards.excludes/UnixDomainSocket-more-specific-type.backwards.excludes b/unix-domain-socket/src/main/mima-filters/1.1.x.backwards.excludes/UnixDomainSocket-more-specific-type.backwards.excludes new file mode 100644 index 000000000..910d95a20 --- /dev/null +++ b/unix-domain-socket/src/main/mima-filters/1.1.x.backwards.excludes/UnixDomainSocket-more-specific-type.backwards.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.unixdomainsocket.javadsl.UnixDomainSocket.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.unixdomainsocket.javadsl.UnixDomainSocket.lookup") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.pekko.stream.connectors.unixdomainsocket.scaladsl.UnixDomainSocket.lookup") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.unixdomainsocket.scaladsl.UnixDomainSocket.lookup") diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala index 51b846ef3..791468be6 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala @@ -48,19 +48,19 @@ private[unixdomainsocket] object UnixDomainSocketImpl { private sealed abstract class ReceiveContext( val queue: SourceQueueWithComplete[ByteString], val buffer: ByteBuffer) - private case class ReceiveAvailable( + private final case class ReceiveAvailable( override val queue: SourceQueueWithComplete[ByteString], override val buffer: ByteBuffer) extends ReceiveContext(queue, buffer) - private case class PendingReceiveAck( + private final case class PendingReceiveAck( override val queue: SourceQueueWithComplete[ByteString], override val buffer: ByteBuffer, pendingResult: Future[QueueOfferResult]) extends ReceiveContext(queue, buffer) private sealed abstract class SendContext( val buffer: ByteBuffer) - private case class SendAvailable( + private final case class SendAvailable( override val buffer: ByteBuffer) extends SendContext(buffer) - private case class SendRequested( + private final case class SendRequested( override val buffer: ByteBuffer, sent: Promise[Done]) extends SendContext(buffer) private case object CloseRequested extends SendContext(ByteString.empty.asByteBuffer) @@ -91,7 +91,7 @@ private[unixdomainsocket] object UnixDomainSocketImpl { if (key != null) { // Observed as sometimes being null via sel.keys().iterator() if (log.isDebugEnabled) { val interestInfo = if (keySelectable) { - val interestSet = key.asInstanceOf[SelectionKey].interestOps() + val interestSet = key.interestOps() val isInterestedInAccept = (interestSet & SelectionKey.OP_ACCEPT) != 0 val isInterestedInConnect = (interestSet & SelectionKey.OP_CONNECT) != 0 @@ -99,9 +99,8 @@ private[unixdomainsocket] object UnixDomainSocketImpl { val isInterestedInWrite = (interestSet & SelectionKey.OP_WRITE) != 0 f"(accept=$isInterestedInAccept%5s connect=$isInterestedInConnect%5s read=$isInterestedInRead%5s write=$isInterestedInWrite%5s)" - } else { + } else "" - } log.debug( f"""ch=${key.channel().hashCode()}%10d @@ -312,17 +311,16 @@ private[unixdomainsocket] object UnixDomainSocketImpl { val sendSink = Sink.fromGraph( Flow[ByteString] .mapConcat { bytes => - if (bytes.size <= sendBufferSize) { + if (bytes.size <= sendBufferSize) Vector(bytes) - } else { + else { @annotation.tailrec def splitToBufferSize(bytes: ByteString, acc: Vector[ByteString]): Vector[ByteString] = if (bytes.nonEmpty) { val (left, right) = bytes.splitAt(sendBufferSize) splitToBufferSize(right, acc :+ left) - } else { + } else acc - } splitToBufferSize(bytes, Vector.empty) } } @@ -341,9 +339,9 @@ private[unixdomainsocket] object UnixDomainSocketImpl { .watchTermination() { case (_, done) => done.onComplete { _ => - sendReceiveContext.send = if (halfClose) { + sendReceiveContext.send = if (halfClose) ShutdownRequested - } else { + else { receiveQueue.complete() CloseRequested } diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala index 5ba10474d..20d095418 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala @@ -20,7 +20,7 @@ import java.util.concurrent.CompletionStage import org.apache.pekko import pekko.NotUsed -import pekko.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } +import pekko.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, ExtensionId, ExtensionIdProvider } import pekko.stream.javadsl.{ Flow, Source } import pekko.stream.Materializer import pekko.util.ByteString @@ -107,8 +107,7 @@ object UnixDomainSocket extends ExtensionId[UnixDomainSocket] with ExtensionIdPr */ override def get(system: ClassicActorSystemProvider): UnixDomainSocket = super.apply(system.classicSystem) - def lookup: ExtensionId[_ <: Extension] = - UnixDomainSocket + def lookup: UnixDomainSocket.type = UnixDomainSocket def createExtension(system: ExtendedActorSystem): UnixDomainSocket = new UnixDomainSocket(system) diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala index 5357bdaf5..f1efcdb73 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala @@ -18,7 +18,7 @@ import java.nio.file.Path import org.apache.pekko import pekko.NotUsed -import pekko.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } +import pekko.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, ExtensionId, ExtensionIdProvider } import pekko.stream._ import pekko.stream.connectors.unixdomainsocket.impl.UnixDomainSocketImpl import pekko.stream.scaladsl.{ Flow, Keep, Sink, Source } @@ -42,8 +42,7 @@ object UnixDomainSocket extends ExtensionId[UnixDomainSocket] with ExtensionIdPr override def createExtension(system: ExtendedActorSystem) = new UnixDomainSocket(system) - override def lookup: ExtensionId[_ <: Extension] = - UnixDomainSocket + override def lookup: UnixDomainSocket.type = UnixDomainSocket /** * * Represents a successful server binding. diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Coalesce.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Coalesce.scala index 7e0fb4496..efe11f77c 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Coalesce.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Coalesce.scala @@ -52,9 +52,8 @@ import pekko.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } isBuffering = false buffer.clear() emit(out, Characters(coalesced), () => emit(out, other, () => if (isClosed(in)) completeStage())) - } else { + } else push(out, other) - } } override def onUpstreamFinish(): Unit = diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlParser.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlParser.scala index 05f12cf1f..612bc74e4 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlParser.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlParser.scala @@ -49,9 +49,8 @@ private[xml] object StreamingXmlParser { private val factory: AsyncXMLInputFactory = new InputFactoryImpl() configureFactory(factory) private val parser: AsyncXMLStreamReader[AsyncByteArrayFeeder] = factory.createAsyncForByteArray() - if (ignoreInvalidChars) { + if (ignoreInvalidChars) parser.getConfig.setIllegalCharHandler(new ReplacingIllegalCharHandler(0)) - } setHandlers(in, out, this) @@ -70,7 +69,7 @@ private[xml] object StreamingXmlParser { } @tailrec private def advanceParser(): Unit = - if (parser.hasNext) { + if (parser.hasNext) parser.next() match { case AsyncXMLStreamReader.EVENT_INCOMPLETE if isClosed(in) && !started => completeStage() case AsyncXMLStreamReader.EVENT_INCOMPLETE if isClosed(in) => failStage(withStreamingFinishedException) @@ -126,9 +125,9 @@ private[xml] object StreamingXmlParser { // Do not support DTD, SPACE, NAMESPACE, NOTATION_DECLARATION, ENTITY_DECLARATION, PROCESSING_INSTRUCTION // ATTRIBUTE is handled in START_ELEMENT implicitly - case x => + case _ => advanceParser() } - } else completeStage() + else completeStage() } } diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlWriter.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlWriter.scala index 8a94c0b57..43ace5ef1 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlWriter.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/StreamingXmlWriter.scala @@ -14,14 +14,14 @@ package org.apache.pekko.stream.connectors.xml.impl import java.nio.charset.Charset - import org.apache.pekko import pekko.annotation.InternalApi import pekko.stream.{ Attributes, FlowShape, Inlet, Outlet } import pekko.stream.connectors.xml._ import pekko.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import pekko.util.{ ByteString, ByteStringBuilder } -import javax.xml.stream.XMLOutputFactory + +import javax.xml.stream.{ XMLOutputFactory, XMLStreamWriter } /** * INTERNAL API @@ -39,23 +39,21 @@ import javax.xml.stream.XMLOutputFactory new GraphStageLogic(shape) with InHandler with OutHandler { val byteStringBuilder = new ByteStringBuilder() - val output = xmlOutputFactory.createXMLStreamWriter(byteStringBuilder.asOutputStream, charset.name()) + val output: XMLStreamWriter = + xmlOutputFactory.createXMLStreamWriter(byteStringBuilder.asOutputStream, charset.name()) setHandlers(in, out, this) def writeAttributes(attributes: List[Attribute]): Unit = - attributes.foreach { att => - att match { - case Attribute(name, value, Some(prefix), Some(namespace)) => - output.writeAttribute(prefix, namespace, name, value) - case Attribute(name, value, None, Some(namespace)) => - output.writeAttribute(namespace, name, value) - case Attribute(name, value, Some(_), None) => - output.writeAttribute(name, value) - case Attribute(name, value, None, None) => - output.writeAttribute(name, value) - } - + attributes.foreach { + case Attribute(name, value, Some(prefix), Some(namespace)) => + output.writeAttribute(prefix, namespace, name, value) + case Attribute(name, value, None, Some(namespace)) => + output.writeAttribute(namespace, name, value) + case Attribute(name, value, Some(_), None) => + output.writeAttribute(name, value) + case Attribute(name, value, None, None) => + output.writeAttribute(name, value) } override def onPush(): Unit = { @@ -113,9 +111,8 @@ import javax.xml.stream.XMLOutputFactory override def onUpstreamFinish(): Unit = { output.flush() val finalData = byteStringBuilder.result().compact - if (finalData.length != 0) { + if (finalData.nonEmpty) emit(out, finalData) - } super.onUpstreamFinish() } } diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subslice.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subslice.scala index bdd78f8fd..c4197402a 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subslice.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subslice.scala @@ -62,42 +62,37 @@ import scala.collection.immutable if (path.isEmpty) setHandler(in, passThrough) else setHandler(in, partialMatch) setHandler(out, this) - lazy val partialMatch: InHandler = new InHandler { - - override def onPush(): Unit = grab(in) match { + lazy val partialMatch: InHandler = () => + grab(in) match { case StartElement(name, _, _, _, _) => if (name == expected.head) { matchedSoFar = expected.head :: matchedSoFar expected = expected.tail - if (expected.isEmpty) { + if (expected.isEmpty) setHandler(in, passThrough) - } - } else { + } else setHandler(in, noMatch) - } pull(in) - case EndElement(name) => + case EndElement(_) => expected = matchedSoFar.head :: expected matchedSoFar = matchedSoFar.tail pull(in) - case other => + case _ => pull(in) } - } - lazy val noMatch: InHandler = new InHandler { var depth = 0 override def onPush(): Unit = grab(in) match { - case start: StartElement => + case _: StartElement => depth += 1 pull(in) - case end: EndElement => + case _: EndElement => if (depth == 0) setHandler(in, partialMatch) else depth -= 1 pull(in) - case other => + case _ => pull(in) } } diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subtree.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subtree.scala index 1e64b2485..efca66c46 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subtree.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/impl/Subtree.scala @@ -54,9 +54,8 @@ import scala.collection.immutable override def onPull(): Unit = pull(in) - val matching: InHandler = new InHandler { - - override def onPush(): Unit = grab(in) match { + val matching: InHandler = () => + grab(in) match { case start: StartElement => val element = createElement(start) elementStack.headOption.foreach { head => @@ -86,17 +85,15 @@ import scala.collection.immutable element.appendChild(doc.createTextNode(text.text)) } pull(in) - case other => + case _ => pull(in) } - } if (path.isEmpty) setHandler(in, matching) else setHandler(in, partialMatch) setHandler(out, this) - lazy val partialMatch: InHandler = new InHandler { - - override def onPush(): Unit = grab(in) match { + lazy val partialMatch: InHandler = () => + grab(in) match { case start: StartElement => if (start.localName == expected.head) { matchedSoFar = expected.head :: matchedSoFar @@ -114,12 +111,10 @@ import scala.collection.immutable expected = matchedSoFar.head :: expected matchedSoFar = matchedSoFar.tail pull(in) - case other => + case _ => pull(in) } - } - lazy val noMatch: InHandler = new InHandler { var depth = 0 @@ -131,7 +126,7 @@ import scala.collection.immutable if (depth == 0) setHandler(in, partialMatch) else depth -= 1 pull(in) - case other => + case _ => pull(in) } } diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/javadsl/XmlParsing.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/javadsl/XmlParsing.scala index 685695707..013e4c08e 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/javadsl/XmlParsing.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/javadsl/XmlParsing.scala @@ -43,7 +43,7 @@ object XmlParsing { */ def parser( configureFactory: Consumer[AsyncXMLInputFactory]): pekko.stream.javadsl.Flow[ByteString, ParseEvent, NotUsed] = - xml.scaladsl.XmlParsing.parser(false, configureFactory.accept(_)).asJava + xml.scaladsl.XmlParsing.parser(false, configureFactory.accept).asJava /** * Parser Flow that takes a stream of ByteStrings and parses them to XML events similar to SAX. @@ -51,7 +51,7 @@ object XmlParsing { def parser( ignoreInvalidChars: Boolean, configureFactory: Consumer[AsyncXMLInputFactory]): pekko.stream.javadsl.Flow[ByteString, ParseEvent, NotUsed] = - xml.scaladsl.XmlParsing.parser(ignoreInvalidChars, configureFactory.accept(_)).asJava + xml.scaladsl.XmlParsing.parser(ignoreInvalidChars, configureFactory.accept).asJava /** * A Flow that transforms a stream of XML ParseEvents. This stage coalesces consequitive CData and Characters diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala index cc06d51c2..c1b8386cd 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala @@ -62,7 +62,7 @@ object Namespace { /** * Java API */ - def create(uri: String, prefix: Optional[String]) = + def create(uri: String, prefix: Optional[String]): Namespace = Namespace(uri, prefix.toScala) } @@ -84,13 +84,13 @@ object Attribute { /** * Java API */ - def create(name: String, value: String, prefix: Optional[String], namespace: Optional[String]) = + def create(name: String, value: String, prefix: Optional[String], namespace: Optional[String]): Attribute = Attribute(name, value, prefix.toScala, namespace.toScala) /** * Java API */ - def create(name: String, value: String) = Attribute(name, value) + def create(name: String, value: String): Attribute = Attribute(name, value) } final case class StartElement(localName: String, @@ -183,7 +183,7 @@ object EndElement { /** * Java API */ - def create(localName: String) = + def create(localName: String): EndElement = EndElement(localName) } @@ -196,7 +196,7 @@ object Characters { /** * Java API */ - def create(text: String) = + def create(text: String): Characters = Characters(text) } @@ -216,7 +216,7 @@ object ProcessingInstruction { /** * Java API */ - def create(target: Optional[String], data: Optional[String]) = + def create(target: Optional[String], data: Optional[String]): ProcessingInstruction = ProcessingInstruction(target.toScala, data.toScala) } @@ -229,7 +229,7 @@ object Comment { /** * Java API */ - def create(text: String) = + def create(text: String): Comment = Comment(text) } @@ -242,6 +242,6 @@ object CData { /** * Java API */ - def create(text: String) = + def create(text: String): CData = CData(text) } diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/scaladsl/XmlParsing.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/scaladsl/XmlParsing.scala index 2cf101363..23c87bc6d 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/scaladsl/XmlParsing.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/scaladsl/XmlParsing.scala @@ -35,9 +35,8 @@ object XmlParsing { val configureDefault: AsyncXMLInputFactory => Unit = { factory => factory.setProperty(XMLInputFactory.SUPPORT_DTD, false) factory.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false) - if (factory.isPropertySupported(XMLConstants.FEATURE_SECURE_PROCESSING)) { + if (factory.isPropertySupported(XMLConstants.FEATURE_SECURE_PROCESSING)) factory.setProperty(XMLConstants.FEATURE_SECURE_PROCESSING, true) - } } /** diff --git a/xml/src/test/java/docs/javadsl/XmlParsingTest.java b/xml/src/test/java/docs/javadsl/XmlParsingTest.java index f4a305792..25928f326 100644 --- a/xml/src/test/java/docs/javadsl/XmlParsingTest.java +++ b/xml/src/test/java/docs/javadsl/XmlParsingTest.java @@ -74,8 +74,7 @@ public void xmlParser() throws InterruptedException, ExecutionException, Timeout resultStage .thenAccept( - (list) -> { - assertThat( + (list) -> assertThat( list, hasItems( StartDocument.getInstance(), @@ -87,8 +86,7 @@ public void xmlParser() throws InterruptedException, ExecutionException, Timeout Characters.create("elem2"), EndElement.create("elem"), EndElement.create("doc"), - EndDocument.getInstance())); - }) + EndDocument.getInstance()))) .toCompletableFuture() .get(5, TimeUnit.SECONDS); } @@ -108,13 +106,11 @@ public void parseAndReadEvents() throws Exception { return Pair.create(textBuffer, Optional.empty()); case XMLEndElement: EndElement s = (EndElement) parseEvent; - switch (s.localName()) { - case "elem": - String text = textBuffer.toString(); - return Pair.create(textBuffer, Optional.of(text)); - default: - return Pair.create(textBuffer, Optional.empty()); + if (s.localName().equals("elem")) { + String text = textBuffer.toString(); + return Pair.create(textBuffer, Optional.of(text)); } + return Pair.create(textBuffer, Optional.empty()); case XMLCharacters: case XMLCData: TextEvent t = (TextEvent) parseEvent; @@ -148,8 +144,7 @@ public void xmlParserConfigured() resultStage .thenAccept( - (list) -> { - assertThat( + (list) -> assertThat( list, hasItems( StartDocument.getInstance(), @@ -161,8 +156,7 @@ public void xmlParserConfigured() Characters.create("elem2"), EndElement.create("elem"), EndElement.create("doc"), - EndDocument.getInstance())); - }) + EndDocument.getInstance()))) .toCompletableFuture() .get(5, TimeUnit.SECONDS); assertThat(configWasCalled[0], is(true)); @@ -194,16 +188,14 @@ public void xmlSubslice() throws InterruptedException, ExecutionException, Timeo resultStage .thenAccept( - (list) -> { - assertThat( + (list) -> assertThat( list, hasItems( Characters.create("i1"), StartElement.create("sub", Collections.emptyMap()), Characters.create("i2"), EndElement.create("sub"), - Characters.create("i3"))); - }) + Characters.create("i3")))) .toCompletableFuture() .get(5, TimeUnit.SECONDS); } @@ -234,11 +226,9 @@ public void xmlSubtree() throws InterruptedException, ExecutionException, Timeou resultStage .thenAccept( - (list) -> { - assertThat( + (list) -> assertThat( list.stream().map(e -> XmlHelper.asString(e).trim()).collect(Collectors.toList()), - hasItems("i1", "i2", "i3")); - }) + hasItems("i1", "i2", "i3"))) .toCompletableFuture() .get(5, TimeUnit.SECONDS); }