diff --git a/checkstyle.xml b/checkstyle.xml new file mode 100644 index 0000000000..3210850bcb --- /dev/null +++ b/checkstyle.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + diff --git a/pom.xml b/pom.xml index 05e6bbdb66..ca2fb6c39c 100644 --- a/pom.xml +++ b/pom.xml @@ -419,9 +419,56 @@ org.apache.maven.plugins maven-checkstyle-plugin - - true - + + + com.puppycrawl.tools + checkstyle + 8.18 + + + + + + validate + none + + true + + + + test-compile + test-compile + + UTF-8 + true + true + true + false + false + true + io/confluent/examples/streams/avro/** + checkstyle.xml + + + check + + + diff --git a/src/main/java/io/confluent/examples/streams/GlobalKTablesExample.java b/src/main/java/io/confluent/examples/streams/GlobalKTablesExample.java index a0bbe1862b..3613ed3393 100644 --- a/src/main/java/io/confluent/examples/streams/GlobalKTablesExample.java +++ b/src/main/java/io/confluent/examples/streams/GlobalKTablesExample.java @@ -99,7 +99,7 @@ public class GlobalKTablesExample { static final String PRODUCT_STORE = "product-store"; static final String ENRICHED_ORDER_TOPIC = "enriched-order"; - public static void main(String[] args) { + public static void main(final String[] args) { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams diff --git a/src/main/java/io/confluent/examples/streams/GlobalKTablesExampleDriver.java b/src/main/java/io/confluent/examples/streams/GlobalKTablesExampleDriver.java index 42ab05a56a..3345d357eb 100644 --- a/src/main/java/io/confluent/examples/streams/GlobalKTablesExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/GlobalKTablesExampleDriver.java @@ -64,7 +64,7 @@ public class GlobalKTablesExampleDriver { private static final Random RANDOM = new Random(); private static final int RECORDS_TO_GENERATE = 100; - public static void main(String[] args) { + public static void main(final String[] args) { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; generateCustomers(bootstrapServers, schemaRegistryUrl, RECORDS_TO_GENERATE); @@ -87,7 +87,7 @@ private static void receiveEnrichedOrders(final String bootstrapServers, final KafkaConsumer consumer = new KafkaConsumer<>(consumerProps); consumer.subscribe(Collections.singleton(ENRICHED_ORDER_TOPIC)); - int received = 0; + final int received = 0; while(received < expected) { final ConsumerRecords records = consumer.poll(Long.MAX_VALUE); records.forEach(record -> System.out.println(record.value())); @@ -183,7 +183,7 @@ private static SpecificAvroSerde createSerde(fin } // Copied from org.apache.kafka.test.TestUtils - private static String randomString(int len) { + private static String randomString(final int len) { final StringBuilder b = new StringBuilder(); for(int i = 0; i < len; ++i) { diff --git a/src/main/java/io/confluent/examples/streams/PageViewRegionExampleDriver.java b/src/main/java/io/confluent/examples/streams/PageViewRegionExampleDriver.java index 1769d68451..707817c526 100644 --- a/src/main/java/io/confluent/examples/streams/PageViewRegionExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/PageViewRegionExampleDriver.java @@ -62,7 +62,7 @@ public static void main(final String[] args) throws IOException { consumeOutput(bootstrapServers); } - private static void produceInputs(String bootstrapServers, String schemaRegistryUrl) throws IOException { + private static void produceInputs(final String bootstrapServers, final String schemaRegistryUrl) throws IOException { final String[] users = {"erica", "bob", "joe", "damian", "tania", "phil", "sam", "lauren", "joseph"}; final String[] regions = {"europe", "usa", "asia", "africa"}; @@ -98,7 +98,7 @@ private static void produceInputs(String bootstrapServers, String schemaRegistry } } - private static void consumeOutput(String bootstrapServers) { + private static void consumeOutput(final String bootstrapServers) { final String resultTopic = "PageViewsByRegion"; final Properties consumerProperties = new Properties(); consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); @@ -119,11 +119,16 @@ private static void consumeOutput(String bootstrapServers) { } } - private static Schema loadSchema(final String name) throws IOException { - try (InputStream input = PageViewRegionLambdaExample.class.getClassLoader() - .getResourceAsStream("avro/io/confluent/examples/streams/" + name)) { - return new Schema.Parser().parse(input); - } + private static Schema loadSchema(final String name) throws IOException { + try ( + final InputStream input = + PageViewRegionLambdaExample + .class + .getClassLoader() + .getResourceAsStream("avro/io/confluent/examples/streams/" + name) + ) { + return new Schema.Parser().parse(input); + } } } diff --git a/src/main/java/io/confluent/examples/streams/PageViewRegionLambdaExample.java b/src/main/java/io/confluent/examples/streams/PageViewRegionLambdaExample.java index 80f0b020dd..3375a83f28 100644 --- a/src/main/java/io/confluent/examples/streams/PageViewRegionLambdaExample.java +++ b/src/main/java/io/confluent/examples/streams/PageViewRegionLambdaExample.java @@ -171,7 +171,7 @@ public static void main(final String[] args) throws Exception { final KTable, Long> viewsByRegion = viewsByUser .leftJoin(userRegions, (view, region) -> { - GenericRecord viewRegion = new GenericData.Record(schema); + final GenericRecord viewRegion = new GenericData.Record(schema); viewRegion.put("user", view.get("user")); viewRegion.put("page", view.get("page")); viewRegion.put("region", region); diff --git a/src/main/java/io/confluent/examples/streams/SessionWindowsExample.java b/src/main/java/io/confluent/examples/streams/SessionWindowsExample.java index 64d493a92f..f41cc655c4 100644 --- a/src/main/java/io/confluent/examples/streams/SessionWindowsExample.java +++ b/src/main/java/io/confluent/examples/streams/SessionWindowsExample.java @@ -109,7 +109,7 @@ public class SessionWindowsExample { static final Long INACTIVITY_GAP = TimeUnit.MINUTES.toMillis(30); static final String PLAY_EVENTS_PER_SESSION = "play-events-per-session"; - public static void main(String[] args) { + public static void main(final String[] args) { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams streams = createStreams(bootstrapServers, diff --git a/src/main/java/io/confluent/examples/streams/SessionWindowsExampleDriver.java b/src/main/java/io/confluent/examples/streams/SessionWindowsExampleDriver.java index 54734f34d0..d5e38b11a9 100644 --- a/src/main/java/io/confluent/examples/streams/SessionWindowsExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/SessionWindowsExampleDriver.java @@ -49,7 +49,7 @@ public class SessionWindowsExampleDriver { public static final int NUM_RECORDS_SENT = 8; - public static void main(String[] args) { + public static void main(final String[] args) { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; producePlayEvents(bootstrapServers, schemaRegistryUrl); diff --git a/src/main/java/io/confluent/examples/streams/SumLambdaExampleDriver.java b/src/main/java/io/confluent/examples/streams/SumLambdaExampleDriver.java index c42c4c4d67..65c9f81f49 100644 --- a/src/main/java/io/confluent/examples/streams/SumLambdaExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/SumLambdaExampleDriver.java @@ -56,7 +56,7 @@ public static void main(final String[] args) throws Exception { consumeOutput(bootstrapServers); } - private static void consumeOutput(String bootstrapServers) { + private static void consumeOutput(final String bootstrapServers) { final Properties properties = new Properties(); properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); @@ -75,7 +75,7 @@ private static void consumeOutput(String bootstrapServers) { } } - private static void produceInput(String bootstrapServers) { + private static void produceInput(final String bootstrapServers) { final Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); diff --git a/src/main/java/io/confluent/examples/streams/TopArticlesExampleDriver.java b/src/main/java/io/confluent/examples/streams/TopArticlesExampleDriver.java index 59160bae82..3c4506c409 100644 --- a/src/main/java/io/confluent/examples/streams/TopArticlesExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/TopArticlesExampleDriver.java @@ -59,14 +59,15 @@ */ public class TopArticlesExampleDriver { - public static void main(String[] args) throws IOException { + public static void main(final String[] args) throws IOException { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; produceInputs(bootstrapServers, schemaRegistryUrl); consumeOutput(bootstrapServers, schemaRegistryUrl); } - private static void produceInputs(String bootstrapServers, String schemaRegistryUrl) throws IOException { + private static void produceInputs(final String bootstrapServers, + final String schemaRegistryUrl) throws IOException { final String[] users = {"erica", "bob", "joe", "damian", "tania", "phil", "sam", "lauren", "joseph"}; final String[] industries = {"engineering", "telco", "finance", "health", "science"}; @@ -85,7 +86,7 @@ private static void produceInputs(String bootstrapServers, String schemaRegistry new GenericRecordBuilder(loadSchema("pageview.avsc")); final Random random = new Random(); - for (String user : users) { + for (final String user : users) { pageViewBuilder.set("industry", industries[random.nextInt(industries.length)]); pageViewBuilder.set("flags", "ARTICLE"); // For each user generate some page views @@ -101,7 +102,7 @@ record -> producer.send(new ProducerRecord<>(TopArticlesLambdaExample.PAGE_VIEWS producer.flush(); } - private static void consumeOutput(String bootstrapServers, String schemaRegistryUrl) { + private static void consumeOutput(final String bootstrapServers, final String schemaRegistryUrl) { final Properties consumerProperties = new Properties(); consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); @@ -118,16 +119,21 @@ private static void consumeOutput(String bootstrapServers, String schemaRegistry consumer.subscribe(Collections.singleton(TopArticlesLambdaExample.TOP_NEWS_PER_INDUSTRY_TOPIC)); while (true) { - ConsumerRecords, String> consumerRecords = consumer.poll(Long.MAX_VALUE); - for (ConsumerRecord, String> consumerRecord : consumerRecords) { + final ConsumerRecords, String> consumerRecords = consumer.poll(Long.MAX_VALUE); + for (final ConsumerRecord, String> consumerRecord : consumerRecords) { System.out.println(consumerRecord.key().key() + "@" + consumerRecord.key().window().start() + "=" + consumerRecord.value()); } } } - static Schema loadSchema(String name) throws IOException { - try (InputStream input = TopArticlesLambdaExample.class.getClassLoader() - .getResourceAsStream("avro/io/confluent/examples/streams/" + name)) { + static Schema loadSchema(final String name) throws IOException { + try ( + final InputStream input = + TopArticlesLambdaExample + .class + .getClassLoader() + .getResourceAsStream("avro/io/confluent/examples/streams/" + name) + ) { return new Schema.Parser().parse(input); } } diff --git a/src/main/java/io/confluent/examples/streams/TopArticlesLambdaExample.java b/src/main/java/io/confluent/examples/streams/TopArticlesLambdaExample.java index 90d0f0653e..6ff7965a8e 100644 --- a/src/main/java/io/confluent/examples/streams/TopArticlesLambdaExample.java +++ b/src/main/java/io/confluent/examples/streams/TopArticlesLambdaExample.java @@ -201,11 +201,11 @@ static KafkaStreams buildTopArticlesStream(final String bootstrapServers, // the selector (windowedArticle, count) -> { // project on the industry field for key - Windowed windowedIndustry = + final Windowed windowedIndustry = new Windowed<>(windowedArticle.key().get("industry").toString(), windowedArticle.window()); // add the page into the value - GenericRecord viewStats = new GenericData.Record(schema); + final GenericRecord viewStats = new GenericData.Record(schema); viewStats.put("page", windowedArticle.key().get("page")); viewStats.put("user", "user"); viewStats.put("industry", windowedArticle.key().get("industry")); diff --git a/src/main/java/io/confluent/examples/streams/WikipediaFeedAvroExampleDriver.java b/src/main/java/io/confluent/examples/streams/WikipediaFeedAvroExampleDriver.java index 9c58fecfc1..9c4cc899a3 100644 --- a/src/main/java/io/confluent/examples/streams/WikipediaFeedAvroExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/WikipediaFeedAvroExampleDriver.java @@ -60,7 +60,8 @@ public static void main(final String[] args) throws IOException { consumeOutput(bootstrapServers, schemaRegistryUrl); } - private static void produceInputs(String bootstrapServers, String schemaRegistryUrl) throws IOException { + private static void produceInputs(final String bootstrapServers, + final String schemaRegistryUrl) { final String[] users = {"erica", "bob", "joe", "damian", "tania", "phil", "sam", "lauren", "joseph"}; @@ -83,7 +84,7 @@ record -> producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_ producer.flush(); } - private static void consumeOutput(String bootstrapServers, String schemaRegistryUrl) { + private static void consumeOutput(final String bootstrapServers, final String schemaRegistryUrl) { final Properties consumerProperties = new Properties(); consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); diff --git a/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesDriver.java b/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesDriver.java index ff6b1528b1..1a05031826 100644 --- a/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesDriver.java +++ b/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesDriver.java @@ -44,7 +44,7 @@ */ public class WordCountInteractiveQueriesDriver { - public static void main(String [] args) throws Exception { + public static void main(final String [] args) throws Exception { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final List inputValues = Arrays.asList("hello world", "all streams lead to kafka", @@ -60,7 +60,7 @@ public static void main(String [] args) throws Exception { "one jolly sailor", "king of the world"); - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); diff --git a/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExample.java b/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExample.java index 1c12183bac..3ef8866ac6 100644 --- a/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExample.java +++ b/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExample.java @@ -146,14 +146,14 @@ public class WordCountInteractiveQueriesExample { static final String TEXT_LINES_TOPIC = "TextLinesTopic"; static final String DEFAULT_HOST = "localhost"; - public static void main(String[] args) throws Exception { + public static void main(final String[] args) throws Exception { if (args.length == 0 || args.length > 2) { throw new IllegalArgumentException("usage: ... [ (optional)]"); } final int port = Integer.valueOf(args[0]); final String bootstrapServers = args.length > 1 ? args[1] : "localhost:9092"; - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); // Give the Streams application a unique name. The name must be unique in the Kafka cluster // against which the application is run. streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "interactive-queries-example"); @@ -195,7 +195,7 @@ public static void main(String[] args) throws Exception { try { streams.close(); restService.stop(); - } catch (Exception e) { + } catch (final Exception e) { // ignored } })); @@ -213,8 +213,8 @@ static WordCountInteractiveQueriesRestService startRestProxy(final KafkaStreams static KafkaStreams createStreams(final Properties streamsConfiguration) { final Serde stringSerde = Serdes.String(); - StreamsBuilder builder = new StreamsBuilder(); - KStream + final StreamsBuilder builder = new StreamsBuilder(); + final KStream textLines = builder.stream(TEXT_LINES_TOPIC, Consumed.with(Serdes.String(), Serdes.String())); final KGroupedStream groupedByWord = textLines diff --git a/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesRestService.java b/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesRestService.java index 256a94f102..d469c09ce8 100644 --- a/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesRestService.java +++ b/src/main/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesRestService.java @@ -55,7 +55,7 @@ public class WordCountInteractiveQueriesRestService { private final KafkaStreams streams; private final MetadataService metadataService; private Server jettyServer; - private HostInfo hostInfo; + private final HostInfo hostInfo; private final Client client = ClientBuilder.newBuilder().register(JacksonFeature.class).build(); WordCountInteractiveQueriesRestService(final KafkaStreams streams, @@ -78,13 +78,13 @@ public class WordCountInteractiveQueriesRestService { public KeyValueBean byKey(@PathParam("storeName") final String storeName, @PathParam("key") final String key) { - HostStoreInfo hostStoreInfo = streamsMetadataForStoreAndKey(storeName, key); + final HostStoreInfo hostStoreInfo = streamsMetadataForStoreAndKey(storeName, key); if (!thisHost(hostStoreInfo)){ return fetchByKey(hostStoreInfo, "state/keyvalue/"+storeName+"/"+key); } // Lookup the KeyValueStore with the provided storeName - final ReadOnlyKeyValueStore store = streams.store(storeName, QueryableStoreTypes.keyValueStore()); + final ReadOnlyKeyValueStore store = streams.store(storeName, QueryableStoreTypes.keyValueStore()); if (store == null) { throw new NotFoundException(); } @@ -154,7 +154,7 @@ public List windowedByKey(@PathParam("storeName") final String sto // Lookup the WindowStore with the provided storeName final ReadOnlyWindowStore store = streams.store(storeName, - QueryableStoreTypes.windowStore()); + QueryableStoreTypes.windowStore()); if (store == null) { throw new NotFoundException(); } @@ -191,7 +191,7 @@ public List streamsMetadata() { @GET() @Path("/instances/{storeName}") @Produces(MediaType.APPLICATION_JSON) - public List streamsMetadataForStore(@PathParam("storeName") String store) { + public List streamsMetadataForStore(@PathParam("storeName") final String store) { return metadataService.streamsMetadataForStore(store); } @@ -205,8 +205,8 @@ public List streamsMetadataForStore(@PathParam("storeName") Strin @GET() @Path("/instance/{storeName}/{key}") @Produces(MediaType.APPLICATION_JSON) - public HostStoreInfo streamsMetadataForStoreAndKey(@PathParam("storeName") String store, - @PathParam("key") String key) { + public HostStoreInfo streamsMetadataForStoreAndKey(@PathParam("storeName") final String store, + @PathParam("key") final String key) { return metadataService.streamsMetadataForStoreAndKey(store, key, new StringSerializer()); } @@ -244,21 +244,21 @@ private boolean thisHost(final HostStoreInfo host) { /** * Start an embedded Jetty Server on the given port * @param port port to run the Server on - * @throws Exception + * @throws Exception if jetty can't start */ void start(final int port) throws Exception { - ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); + final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); context.setContextPath("/"); jettyServer = new Server(port); jettyServer.setHandler(context); - ResourceConfig rc = new ResourceConfig(); + final ResourceConfig rc = new ResourceConfig(); rc.register(this); rc.register(JacksonFeature.class); - ServletContainer sc = new ServletContainer(rc); - ServletHolder holder = new ServletHolder(sc); + final ServletContainer sc = new ServletContainer(rc); + final ServletHolder holder = new ServletHolder(sc); context.addServlet(holder, "/*"); jettyServer.start(); @@ -266,7 +266,7 @@ void start(final int port) throws Exception { /** * Stop the Jetty Server - * @throws Exception + * @throws Exception if jetty can't stop */ void stop() throws Exception { if (jettyServer != null) { diff --git a/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExample.java b/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExample.java index a0f45f6b04..397da69530 100644 --- a/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExample.java +++ b/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExample.java @@ -180,7 +180,7 @@ public class KafkaMusicExample { private static final String DEFAULT_BOOTSTRAP_SERVERS = "localhost:9092"; private static final String DEFAULT_SCHEMA_REGISTRY_URL = "http://localhost:8081"; - public static void main(String[] args) throws Exception { + public static void main(final String[] args) throws Exception { if (args.length == 0 || args.length > 4) { throw new IllegalArgumentException("usage: ... " + "[ (optional, default: " + DEFAULT_BOOTSTRAP_SERVERS + ")] " + @@ -227,7 +227,7 @@ public static void main(String[] args) throws Exception { try { restService.stop(); streams.close(); - } catch (Exception e) { + } catch (final Exception e) { // ignored } })); @@ -266,14 +266,14 @@ static KafkaStreams createChartsStreams(final String bootstrapServers, // situations where the input topic was not pre-created before running the application because // the application will discover a newly created topic faster. In production, you would // typically not change this parameter from its default. - String metadataMaxAgeMs = System.getProperty(ConsumerConfig.METADATA_MAX_AGE_CONFIG); + final String metadataMaxAgeMs = System.getProperty(ConsumerConfig.METADATA_MAX_AGE_CONFIG); if (metadataMaxAgeMs != null) { try { - int value = Integer.parseInt(metadataMaxAgeMs); + final int value = Integer.parseInt(metadataMaxAgeMs); streamsConfiguration.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, value); System.out.println("Set consumer configuration " + ConsumerConfig.METADATA_MAX_AGE_CONFIG + " to " + value); - } catch (NumberFormatException ignored) { + } catch (final NumberFormatException ignored) { } } @@ -407,12 +407,12 @@ public byte[] serialize(final String s, final TopFiveSongs topFiveSongs) { dataOutputStream = new DataOutputStream(out); try { - for (SongPlayCount songPlayCount : topFiveSongs) { + for (final SongPlayCount songPlayCount : topFiveSongs) { dataOutputStream.writeLong(songPlayCount.getSongId()); dataOutputStream.writeLong(songPlayCount.getPlays()); } dataOutputStream.flush(); - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException(e); } return out.toByteArray(); @@ -449,7 +449,7 @@ public TopFiveSongs deserialize(final String s, final byte[] bytes) { result.add(new SongPlayCount(dataInputStream.readLong(), dataInputStream.readLong())); } - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException(e); } return result; diff --git a/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleDriver.java b/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleDriver.java index 31f2b86965..426d22aedb 100644 --- a/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleDriver.java +++ b/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleDriver.java @@ -60,7 +60,7 @@ */ public class KafkaMusicExampleDriver { - public static void main(String [] args) throws Exception { + public static void main(final String [] args) throws Exception { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; System.out.println("Connecting to Kafka cluster via bootstrap servers " + bootstrapServers); diff --git a/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/MusicPlaysRestService.java b/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/MusicPlaysRestService.java index 67593ca2f7..cd0b3405d1 100644 --- a/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/MusicPlaysRestService.java +++ b/src/main/java/io/confluent/examples/streams/interactivequeries/kafkamusic/MusicPlaysRestService.java @@ -167,7 +167,7 @@ private List topFiveSongs(final String key, @GET() @Path("/song/{id}") @Produces(MediaType.APPLICATION_JSON) - public SongBean song(@PathParam("id") Long songId) { + public SongBean song(@PathParam("id") final Long songId) { final ReadOnlyKeyValueStore songStore = streams.store(KafkaMusicExample.ALL_SONGS, QueryableStoreTypes.keyValueStore()); final Song song = songStore.get(songId); @@ -198,7 +198,7 @@ public List streamsMetadata() { @GET() @Path("/instances/{storeName}") @Produces(MediaType.APPLICATION_JSON) - public List streamsMetadataForStore(@PathParam("storeName") String store) { + public List streamsMetadataForStore(@PathParam("storeName") final String store) { return metadataService.streamsMetadataForStore(store); } @@ -207,18 +207,18 @@ public List streamsMetadataForStore(@PathParam("storeName") Strin * @throws Exception */ void start() throws Exception { - ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); + final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); context.setContextPath("/"); jettyServer = new Server(hostInfo.port()); jettyServer.setHandler(context); - ResourceConfig rc = new ResourceConfig(); + final ResourceConfig rc = new ResourceConfig(); rc.register(this); rc.register(JacksonFeature.class); - ServletContainer sc = new ServletContainer(rc); - ServletHolder holder = new ServletHolder(sc); + final ServletContainer sc = new ServletContainer(rc); + final ServletHolder holder = new ServletHolder(sc); context.addServlet(holder, "/*"); jettyServer.start(); diff --git a/src/main/java/io/confluent/examples/streams/microservices/EmailService.java b/src/main/java/io/confluent/examples/streams/microservices/EmailService.java index 91ab0b35e5..177241ec26 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/EmailService.java +++ b/src/main/java/io/confluent/examples/streams/microservices/EmailService.java @@ -38,12 +38,12 @@ public class EmailService implements Service { private Joined serdes4 = Joined .with(ORDERS.keySerde(), ORDERS.valueSerde(), PAYMENTS.valueSerde()); - public EmailService(Emailer emailer) { + public EmailService(final Emailer emailer) { this.emailer = emailer; } @Override - public void start(String bootstrapServers) { + public void start(final String bootstrapServers) { streams = processStreams(bootstrapServers, "/tmp/kafka-streams"); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); @@ -52,12 +52,13 @@ public void start(String bootstrapServers) { private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) { - KStreamBuilder builder = new KStreamBuilder(); + final KStreamBuilder builder = new KStreamBuilder(); //Create the streams/tables for the join - KStream orders = builder.stream(ORDERS.keySerde(), ORDERS.valueSerde(), ORDERS.name()); + final KStream orders = builder.stream(ORDERS.keySerde(), ORDERS.valueSerde(), ORDERS.name()); KStream payments = builder.stream(PAYMENTS.keySerde(), PAYMENTS.valueSerde(), PAYMENTS.name()); - GlobalKTable customers = builder.globalTable(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde(), CUSTOMERS.name()); + final GlobalKTable customers = + builder.globalTable(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde(), CUSTOMERS.name()); //Rekey payments to be by OrderId for the windowed join payments = payments.selectKey((s, payment) -> payment.getOrderId()); @@ -79,8 +80,8 @@ private KafkaStreams processStreams(final String bootstrapServers, final String return new KafkaStreams(builder, baseStreamsConfig(bootstrapServers, stateDir, APP_ID)); } - public static void main(String[] args) throws Exception { - EmailService service = new EmailService(new LoggingEmailer()); + public static void main(final String[] args) throws Exception { + final EmailService service = new EmailService(new LoggingEmailer()); service.start(parseArgsAndConfigure(args)); addShutdownHookAndBlock(service); } @@ -88,7 +89,7 @@ public static void main(String[] args) throws Exception { private static class LoggingEmailer implements Emailer { @Override - public void sendEmail(EmailTuple details) { + public void sendEmail(final EmailTuple details) { //In a real implementation we would do something a little more useful log.warn("Sending an email to: \nCustomer:%s\nOrder:%s\nPayment%s", details.customer, details.order, details.payment); @@ -112,12 +113,12 @@ public class EmailTuple { public Payment payment; public Customer customer; - public EmailTuple(Order order, Payment payment) { + public EmailTuple(final Order order, final Payment payment) { this.order = order; this.payment = payment; } - EmailTuple setCustomer(Customer customer) { + EmailTuple setCustomer(final Customer customer) { this.customer = customer; return this; } diff --git a/src/main/java/io/confluent/examples/streams/microservices/FraudService.java b/src/main/java/io/confluent/examples/streams/microservices/FraudService.java index 6cb7a8e813..0bf2ced478 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/FraudService.java +++ b/src/main/java/io/confluent/examples/streams/microservices/FraudService.java @@ -47,7 +47,7 @@ public class FraudService implements Service { private KafkaStreams streams; @Override - public void start(String bootstrapServers) { + public void start(final String bootstrapServers) { streams = processStreams(bootstrapServers, "/tmp/kafka-streams"); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); @@ -57,14 +57,14 @@ public void start(String bootstrapServers) { private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) { //Latch onto instances of the orders and inventory topics - StreamsBuilder builder = new StreamsBuilder(); - KStream orders = builder + final StreamsBuilder builder = new StreamsBuilder(); + final KStream orders = builder .stream(ORDERS.name(), Consumed.with(ORDERS.keySerde(), ORDERS.valueSerde())) .filter((id, order) -> OrderState.CREATED.equals(order.getState())); //Create an aggregate of the total value by customer and hold it with the order. We use session windows to // detect periods of activity. - KTable, OrderValue> aggregate = orders + final KTable, OrderValue> aggregate = orders .groupBy((id, order) -> order.getCustomerId(), Serialized.with(Serdes.Long(), ORDERS.valueSerde())) .windowedBy(SessionWindows.with(60 * MIN)) .aggregate(OrderValue::new, @@ -75,13 +75,13 @@ private KafkaStreams processStreams(final String bootstrapServers, final String Materialized.with(null, Schemas.ORDER_VALUE_SERDE)); //Ditch the windowing and rekey - KStream ordersWithTotals = aggregate + final KStream ordersWithTotals = aggregate .toStream((windowedKey, orderValue) -> windowedKey.key()) .filter((k, v) -> v != null)//When elements are evicted from a session window they create delete events. Filter these out. .selectKey((id, orderValue) -> orderValue.getOrder().getId()); //Now branch the stream into two, for pass and fail, based on whether the windowed total is over Fraud Limit - KStream[] forks = ordersWithTotals.branch( + final KStream[] forks = ordersWithTotals.branch( (id, orderValue) -> orderValue.getValue() >= FRAUD_LIMIT, (id, orderValue) -> orderValue.getValue() < FRAUD_LIMIT); @@ -99,18 +99,18 @@ private KafkaStreams processStreams(final String bootstrapServers, final String //as caching in Kafka Streams will conflate subsequent updates for the same key. Disabling caching ensures //we get a complete "changelog" from the aggregate(...) step above (i.e. every input event will have a //corresponding output event. - Properties props = baseStreamsConfig(bootstrapServers, stateDir, FRAUD_SERVICE_APP_ID); + final Properties props = baseStreamsConfig(bootstrapServers, stateDir, FRAUD_SERVICE_APP_ID); props.setProperty(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0"); return new KafkaStreams(builder.build(), props); } - private OrderValue simpleMerge(OrderValue a, OrderValue b) { + private OrderValue simpleMerge(final OrderValue a, final OrderValue b) { return new OrderValue(b.getOrder(), (a == null ? 0D : a.getValue()) + b.getValue()); } - public static void main(String[] args) throws Exception { - FraudService service = new FraudService(); + public static void main(final String[] args) throws Exception { + final FraudService service = new FraudService(); service.start(parseArgsAndConfigure(args)); addShutdownHookAndBlock(service); } diff --git a/src/main/java/io/confluent/examples/streams/microservices/InventoryService.java b/src/main/java/io/confluent/examples/streams/microservices/InventoryService.java index 82e52fc9d8..a8d039d415 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/InventoryService.java +++ b/src/main/java/io/confluent/examples/streams/microservices/InventoryService.java @@ -48,7 +48,7 @@ public class InventoryService implements Service { private KafkaStreams streams; @Override - public void start(String bootstrapServers) { + public void start(final String bootstrapServers) { streams = processStreams(bootstrapServers, "/tmp/kafka-streams"); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); @@ -65,17 +65,17 @@ public void stop() { private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) { //Latch onto instances of the orders and inventory topics - StreamsBuilder builder = new StreamsBuilder(); - KStream orders = builder + final StreamsBuilder builder = new StreamsBuilder(); + final KStream orders = builder .stream(Topics.ORDERS.name(), Consumed.with(Topics.ORDERS.keySerde(), Topics.ORDERS.valueSerde())); - KTable warehouseInventory = builder + final KTable warehouseInventory = builder .table(Topics.WAREHOUSE_INVENTORY.name(), Consumed .with(Topics.WAREHOUSE_INVENTORY.keySerde(), Topics.WAREHOUSE_INVENTORY.valueSerde())); //Create a store to reserve inventory whilst the order is processed. //This will be prepopulated from Kafka before the service starts processing - StoreBuilder reservedStock = Stores + final StoreBuilder reservedStock = Stores .keyValueStoreBuilder(Stores.persistentKeyValueStore(RESERVED_STOCK_STORE_NAME), Topics.WAREHOUSE_INVENTORY.keySerde(), Serdes.Long()) .withLoggingEnabled(new HashMap<>()); @@ -105,7 +105,7 @@ private static class InventoryValidator implements @Override @SuppressWarnings("unchecked") - public void init(ProcessorContext context) { + public void init(final ProcessorContext context) { reservedStocksStore = (KeyValueStore) context .getStateStore(RESERVED_STOCK_STORE_NAME); } @@ -114,9 +114,9 @@ public void init(ProcessorContext context) { public KeyValue transform(final Product productId, final KeyValue orderAndStock) { //Process each order/inventory pair one at a time - OrderValidation validated; - Order order = orderAndStock.key; - Integer warehouseStockCount = orderAndStock.value; + final OrderValidation validated; + final Order order = orderAndStock.key; + final Integer warehouseStockCount = orderAndStock.value; //Look up locally 'reserved' stock from our state store Long reserved = reservedStocksStore.get(order.getProduct()); @@ -138,7 +138,7 @@ public KeyValue transform(final Product productId, } @Override - public KeyValue punctuate(long timestamp) { + public KeyValue punctuate(final long timestamp) { return null; } @@ -147,8 +147,8 @@ public void close() { } } - public static void main(String[] args) throws Exception { - InventoryService service = new InventoryService(); + public static void main(final String[] args) throws Exception { + final InventoryService service = new InventoryService(); service.start(parseArgsAndConfigure(args)); addShutdownHookAndBlock(service); } diff --git a/src/main/java/io/confluent/examples/streams/microservices/OrderDetailsService.java b/src/main/java/io/confluent/examples/streams/microservices/OrderDetailsService.java index 2ec4979b42..cbb31deac6 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/OrderDetailsService.java +++ b/src/main/java/io/confluent/examples/streams/microservices/OrderDetailsService.java @@ -53,27 +53,27 @@ public class OrderDetailsService implements Service { private volatile boolean running; @Override - public void start(String bootstrapServers) { + public void start(final String bootstrapServers) { executorService.execute(() -> startService(bootstrapServers)); running = true; log.info("Started Service " + getClass().getSimpleName()); } - private void startService(String bootstrapServers) { + private void startService(final String bootstrapServers) { startConsumer(bootstrapServers); startProducer(bootstrapServers); try { - Map consumedOffsets = new HashMap<>(); + final Map consumedOffsets = new HashMap<>(); consumer.subscribe(singletonList(Topics.ORDERS.name())); producer.initTransactions(); while (running) { - ConsumerRecords records = consumer.poll(100); + final ConsumerRecords records = consumer.poll(100); if (records.count() > 0) { producer.beginTransaction(); - for (ConsumerRecord record : records) { - Order order = record.value(); + for (final ConsumerRecord record : records) { + final Order order = record.value(); if (OrderState.CREATED.equals(order.getState())) { //Validate the order then send the result (but note we are in a transaction so //nothing will be "seen" downstream until we commit the transaction below) @@ -90,14 +90,14 @@ private void startService(String bootstrapServers) { } } - private void recordOffset(Map consumedOffsets, - ConsumerRecord record) { - OffsetAndMetadata nextOffset = new OffsetAndMetadata(record.offset() + 1); + private void recordOffset(final Map consumedOffsets, + final ConsumerRecord record) { + final OffsetAndMetadata nextOffset = new OffsetAndMetadata(record.offset() + 1); consumedOffsets.put(new TopicPartition(record.topic(), record.partition()), nextOffset); } - private ProducerRecord result(Order order, - OrderValidationResult passOrFail) { + private ProducerRecord result(final Order order, + final OrderValidationResult passOrFail) { return new ProducerRecord<>( Topics.ORDER_VALIDATIONS.name(), order.getId(), @@ -105,8 +105,8 @@ private ProducerRecord result(Order order, ); } - private void startProducer(String bootstrapServers) { - Properties producerConfig = new Properties(); + private void startProducer(final String bootstrapServers) { + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); producerConfig.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "OrderDetailsServiceInstance1"); producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); @@ -118,8 +118,8 @@ private void startProducer(String bootstrapServers) { Topics.ORDER_VALIDATIONS.valueSerde().serializer()); } - private void startConsumer(String bootstrapServers) { - Properties consumerConfig = new Properties(); + private void startConsumer(final String bootstrapServers) { + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, CONSUMER_GROUP_ID); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); @@ -144,13 +144,13 @@ public void stop() { running = false; try { executorService.awaitTermination(1000, TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { log.info("Failed to stop " + getClass().getSimpleName() + " in 1000ms"); } log.info(getClass().getSimpleName() + " was stopped"); } - private boolean isValid(Order order) { + private boolean isValid(final Order order) { if (order.getCustomerId() == null) { return false; } @@ -163,8 +163,8 @@ private boolean isValid(Order order) { return order.getProduct() != null; } - public static void main(String[] args) throws Exception { - OrderDetailsService service = new OrderDetailsService(); + public static void main(final String[] args) throws Exception { + final OrderDetailsService service = new OrderDetailsService(); service.start(MicroserviceUtils.parseArgsAndConfigure(args)); addShutdownHookAndBlock(service); } diff --git a/src/main/java/io/confluent/examples/streams/microservices/OrdersService.java b/src/main/java/io/confluent/examples/streams/microservices/OrdersService.java index c31f9ffb23..7fd0399bec 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/OrdersService.java +++ b/src/main/java/io/confluent/examples/streams/microservices/OrdersService.java @@ -111,12 +111,12 @@ public class OrdersService implements Service { // different users and (b) periodically purge old entries from this map. private Map> outstandingRequests = new ConcurrentHashMap<>(); - public OrdersService(String host, int port) { + public OrdersService(final String host, final int port) { this.host = host; this.port = port; } - public OrdersService(String host) { + public OrdersService(final String host) { this(host, 0); } @@ -126,7 +126,7 @@ public OrdersService(String host) { * fulfilled. */ private KStreamBuilder createOrdersMaterializedView() { - KStreamBuilder builder = new KStreamBuilder(); + final KStreamBuilder builder = new KStreamBuilder(); builder.stream(ORDERS.keySerde(), ORDERS.valueSerde(), ORDERS.name()) .groupByKey(ORDERS.keySerde(), ORDERS.valueSerde()) .reduce((agg, newVal) -> newVal, ORDERS_STORE_NAME) @@ -134,8 +134,8 @@ private KStreamBuilder createOrdersMaterializedView() { return builder; } - private void maybeCompleteLongPollGet(String id, Order order) { - FilteredResponse callback = outstandingRequests.get(id); + private void maybeCompleteLongPollGet(final String id, final Order order) { + final FilteredResponse callback = outstandingRequests.get(id); if (callback != null && callback.predicate.test(id, order)) { callback.asyncResponse.resume(toBean(order)); } @@ -157,11 +157,11 @@ private void maybeCompleteLongPollGet(String id, Order order) { @Path("/orders/{id}") @Produces({MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN}) public void getWithTimeout(@PathParam("id") final String id, - @QueryParam("timeout") @DefaultValue(CALL_TIMEOUT) Long timeout, + @QueryParam("timeout") @DefaultValue(CALL_TIMEOUT) final Long timeout, @Suspended final AsyncResponse asyncResponse) { setTimeout(timeout, asyncResponse); - HostStoreInfo hostForKey = getKeyLocationOrBlock(id, asyncResponse); + final HostStoreInfo hostForKey = getKeyLocationOrBlock(id, asyncResponse); if (hostForKey == null) { //request timed out so return return; @@ -170,7 +170,7 @@ public void getWithTimeout(@PathParam("id") final String id, if (thisHost(hostForKey)) { fetchLocal(id, asyncResponse, (k, v) -> true); } else { - String path = new Paths(hostForKey.getHost(), hostForKey.getPort()).urlGet(id); + final String path = new Paths(hostForKey.getHost(), hostForKey.getPort()).urlGet(id); fetchFromOtherHost(path, asyncResponse, timeout); } } @@ -179,7 +179,7 @@ class FilteredResponse { private AsyncResponse asyncResponse; private Predicate predicate; - FilteredResponse(AsyncResponse asyncResponse, Predicate predicate) { + FilteredResponse(final AsyncResponse asyncResponse, final Predicate predicate) { this.asyncResponse = asyncResponse; this.predicate = predicate; } @@ -193,17 +193,19 @@ class FilteredResponse { * @param predicate a filter that for this fetch, so for example we might fetch only VALIDATED * orders. */ - private void fetchLocal(String id, AsyncResponse asyncResponse, Predicate predicate) { + private void fetchLocal(final String id, + final AsyncResponse asyncResponse, + final Predicate predicate) { log.info("running GET on this node"); try { - Order order = ordersStore().get(id); + final Order order = ordersStore().get(id); if (order == null || !predicate.test(id, order)) { log.info("Delaying get as order not present for id " + id); outstandingRequests.put(id, new FilteredResponse<>(asyncResponse, predicate)); } else { asyncResponse.resume(toBean(order)); } - } catch (InvalidStateStoreException e) { + } catch (final InvalidStateStoreException e) { //Store not ready so delay outstandingRequests.put(id, new FilteredResponse<>(asyncResponse, predicate)); } @@ -220,7 +222,7 @@ private ReadOnlyKeyValueStore ordersStore() { *

* If metadata is available, which can happen on startup, or during a rebalance, block until it is. */ - private HostStoreInfo getKeyLocationOrBlock(String id, AsyncResponse asyncResponse) { + private HostStoreInfo getKeyLocationOrBlock(final String id, final AsyncResponse asyncResponse) { HostStoreInfo locationOfKey; while (locationMetadataIsUnavailable(locationOfKey = getHostForOrderId(id))) { //The metastore is not available. This can happen on startup/rebalance. @@ -231,14 +233,14 @@ private HostStoreInfo getKeyLocationOrBlock(String id, AsyncResponse asyncRespon try { //Sleep a bit until metadata becomes available Thread.sleep(Math.min(Long.valueOf(CALL_TIMEOUT), 200)); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { e.printStackTrace(); } } return locationOfKey; } - private boolean locationMetadataIsUnavailable(HostStoreInfo hostWithKey) { + private boolean locationMetadataIsUnavailable(final HostStoreInfo hostWithKey) { return NOT_AVAILABLE.host().equals(hostWithKey.getHost()) && NOT_AVAILABLE.port() == hostWithKey.getPort(); } @@ -248,16 +250,16 @@ private boolean thisHost(final HostStoreInfo host) { host.getPort() == port; } - private void fetchFromOtherHost(final String path, AsyncResponse asyncResponse, long timeout) { + private void fetchFromOtherHost(final String path, final AsyncResponse asyncResponse, final long timeout) { log.info("Chaining GET to a different instance: " + path); try { - OrderBean bean = client.target(path) + final OrderBean bean = client.target(path) .queryParam("timeout", timeout) .request(MediaType.APPLICATION_JSON_TYPE) .get(new GenericType() { }); asyncResponse.resume(bean); - } catch (Exception swallowed) { + } catch (final Exception swallowed) { } } @@ -265,11 +267,11 @@ private void fetchFromOtherHost(final String path, AsyncResponse asyncResponse, @ManagedAsync @Path("orders/{id}/validated") public void getPostValidationWithTimeout(@PathParam("id") final String id, - @QueryParam("timeout") @DefaultValue(CALL_TIMEOUT) Long timeout, + @QueryParam("timeout") @DefaultValue(CALL_TIMEOUT) final Long timeout, @Suspended final AsyncResponse asyncResponse) { setTimeout(timeout, asyncResponse); - HostStoreInfo hostForKey = getKeyLocationOrBlock(id, asyncResponse); + final HostStoreInfo hostForKey = getKeyLocationOrBlock(id, asyncResponse); if (hostForKey == null) { //request timed out so return return; @@ -301,13 +303,13 @@ public void submitOrder(final OrderBean order, @Suspended final AsyncResponse response) { setTimeout(timeout, response); - Order bean = fromBean(order); + final Order bean = fromBean(order); producer.send(new ProducerRecord<>(ORDERS.name(), bean.getId(), bean), callback(response, bean.getId())); } @Override - public void start(String bootstrapServers) { + public void start(final String bootstrapServers) { jettyServer = startJetty(port, this); port = jettyServer.getURI().getPort(); // update port, in case port was zero producer = startProducer(bootstrapServers, ORDER_VALIDATIONS); @@ -315,8 +317,8 @@ public void start(String bootstrapServers) { log.info("Started Service " + getClass().getSimpleName()); } - private KafkaStreams startKStreams(String bootstrapServers) { - KafkaStreams streams = new KafkaStreams(createOrdersMaterializedView(), + private KafkaStreams startKStreams(final String bootstrapServers) { + final KafkaStreams streams = new KafkaStreams(createOrdersMaterializedView(), config(bootstrapServers)); metadataService = new MetadataService(streams); streams.cleanUp(); //don't do this in prod as it clears your state stores @@ -324,8 +326,8 @@ private KafkaStreams startKStreams(String bootstrapServers) { return streams; } - private Properties config(String bootstrapServers) { - Properties props = baseStreamsConfig(bootstrapServers, "/tmp/kafka-streams", SERVICE_APP_ID); + private Properties config(final String bootstrapServers) { + final Properties props = baseStreamsConfig(bootstrapServers, "/tmp/kafka-streams", SERVICE_APP_ID); props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, host + ":" + port); return props; } @@ -341,7 +343,7 @@ public void stop() { if (jettyServer != null) { try { jettyServer.stop(); - } catch (Exception e) { + } catch (final Exception e) { e.printStackTrace(); } } @@ -358,7 +360,7 @@ public int port() { return port; } - private HostStoreInfo getHostForOrderId(String orderId) { + private HostStoreInfo getHostForOrderId(final String orderId) { return metadataService .streamsMetadataForStoreAndKey(ORDERS_STORE_NAME, orderId, Serdes.String().serializer()); } @@ -370,16 +372,16 @@ private Callback callback(final AsyncResponse response, final String orderId) { } else { try { //Return the location of the newly created resource - Response uri = Response.created(new URI("/v1/orders/" + orderId)).build(); + final Response uri = Response.created(new URI("/v1/orders/" + orderId)).build(); response.resume(uri); - } catch (URISyntaxException e2) { + } catch (final URISyntaxException e2) { e2.printStackTrace(); } } }; } - public static void main(String[] args) throws Exception { + public static void main(final String[] args) throws Exception { final String bootstrapServers = args.length > 1 ? args[1] : "localhost:9092"; final String schemaRegistryUrl = args.length > 2 ? args[2] : "http://localhost:8081"; @@ -387,7 +389,7 @@ public static void main(String[] args) throws Exception { final String restPort = args.length > 4 ? args[4] : null; Schemas.configureSerdesWithSchemaRegistryUrl(schemaRegistryUrl); - OrdersService service = new OrdersService(restHostname, restPort == null ? 0 : Integer.valueOf(restPort)); + final OrdersService service = new OrdersService(restHostname, restPort == null ? 0 : Integer.valueOf(restPort)); service.start(bootstrapServers); addShutdownHookAndBlock(service); } diff --git a/src/main/java/io/confluent/examples/streams/microservices/ValidationsAggregatorService.java b/src/main/java/io/confluent/examples/streams/microservices/ValidationsAggregatorService.java index 83ab8f3174..154c8b2738 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/ValidationsAggregatorService.java +++ b/src/main/java/io/confluent/examples/streams/microservices/ValidationsAggregatorService.java @@ -56,20 +56,20 @@ public class ValidationsAggregatorService implements Service { private KafkaStreams streams; @Override - public void start(String bootstrapServers) { + public void start(final String bootstrapServers) { streams = aggregateOrderValidations(bootstrapServers, "/tmp/kafka-streams"); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); log.info("Started Service " + getClass().getSimpleName()); } - private KafkaStreams aggregateOrderValidations(String bootstrapServers, String stateDir) { + private KafkaStreams aggregateOrderValidations(final String bootstrapServers, final String stateDir) { final int numberOfRules = 3; //TODO put into a KTable to make dynamically configurable - StreamsBuilder builder = new StreamsBuilder(); - KStream validations = builder + final StreamsBuilder builder = new StreamsBuilder(); + final KStream validations = builder .stream(ORDER_VALIDATIONS.name(), serdes1); - KStream orders = builder + final KStream orders = builder .stream(ORDERS.name(), serdes2) .filter((id, order) -> OrderState.CREATED.equals(order.getState())); @@ -120,8 +120,8 @@ public void stop() { } } - public static void main(String[] args) throws Exception { - ValidationsAggregatorService service = new ValidationsAggregatorService(); + public static void main(final String[] args) throws Exception { + final ValidationsAggregatorService service = new ValidationsAggregatorService(); service.start(parseArgsAndConfigure(args)); addShutdownHookAndBlock(service); } diff --git a/src/main/java/io/confluent/examples/streams/microservices/domain/Schemas.java b/src/main/java/io/confluent/examples/streams/microservices/domain/Schemas.java index 3bc6c28ca4..891aedd48a 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/domain/Schemas.java +++ b/src/main/java/io/confluent/examples/streams/microservices/domain/Schemas.java @@ -27,11 +27,11 @@ public class Schemas { public static class Topic { - private String name; - private Serde keySerde; - private Serde valueSerde; + private final String name; + private final Serde keySerde; + private final Serde valueSerde; - Topic(String name, Serde keySerde, Serde valueSerde) { + Topic(final String name, final Serde keySerde, final Serde valueSerde) { this.name = name; this.keySerde = keySerde; this.valueSerde = valueSerde; @@ -80,9 +80,9 @@ private static void createTopics() { } } - public static void configureSerdesWithSchemaRegistryUrl(String url) { + public static void configureSerdesWithSchemaRegistryUrl(final String url) { Topics.createTopics(); //wipe cached schema registry - for (Topic topic : Topics.ALL.values()) { + for (final Topic topic : Topics.ALL.values()) { configure(topic.keySerde(), url); configure(topic.valueSerde(), url); } @@ -90,7 +90,7 @@ public static void configureSerdesWithSchemaRegistryUrl(String url) { schemaRegistryUrl = url; } - private static void configure(Serde serde, String url) { + private static void configure(final Serde serde, final String url) { if (serde instanceof SpecificAvroSerde) { serde.configure(Collections.singletonMap(SCHEMA_REGISTRY_URL_CONFIG, url), false); } diff --git a/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderBean.java b/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderBean.java index ef48732bcb..1f70bda23d 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderBean.java +++ b/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderBean.java @@ -20,8 +20,12 @@ public OrderBean() { } - public OrderBean(String id, long customerId, OrderState state, Product product, int quantity, - double price) { + public OrderBean(final String id, + final long customerId, + final OrderState state, + final Product product, + final int quantity, + final double price) { this.id = id; this.customerId = customerId; this.state = state; @@ -54,12 +58,12 @@ public double getPrice() { return price; } - public void setId(String id) { + public void setId(final String id) { this.id = id; } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -67,7 +71,7 @@ public boolean equals(Object o) { return false; } - OrderBean orderBean = (OrderBean) o; + final OrderBean orderBean = (OrderBean) o; if (this.customerId != orderBean.customerId) { return false; @@ -103,7 +107,7 @@ public String toString() { @Override public int hashCode() { int result; - long temp; + final long temp; result = this.id != null ? this.id.hashCode() : 0; result = 31 * result + (int) (this.customerId ^ this.customerId >>> 32); result = 31 * result + (this.state != null ? this.state.hashCode() : 0); @@ -114,7 +118,7 @@ public int hashCode() { return result; } - public static OrderBean toBean(Order order) { + public static OrderBean toBean(final Order order) { return new OrderBean(order.getId(), order.getCustomerId(), order.getState(), @@ -123,7 +127,7 @@ public static OrderBean toBean(Order order) { order.getPrice()); } - public static Order fromBean(OrderBean order) { + public static Order fromBean(final OrderBean order) { return new Order(order.getId(), order.getCustomerId(), order.getState(), diff --git a/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderId.java b/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderId.java index 2baaa698b6..e6acb1e755 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderId.java +++ b/src/main/java/io/confluent/examples/streams/microservices/domain/beans/OrderId.java @@ -2,7 +2,7 @@ public class OrderId { - public static String id(long id) { + public static String id(final long id) { return String.valueOf(id); } } \ No newline at end of file diff --git a/src/main/java/io/confluent/examples/streams/microservices/util/MicroserviceUtils.java b/src/main/java/io/confluent/examples/streams/microservices/util/MicroserviceUtils.java index 73a6747029..ecaf79c7c3 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/util/MicroserviceUtils.java +++ b/src/main/java/io/confluent/examples/streams/microservices/util/MicroserviceUtils.java @@ -34,7 +34,7 @@ public class MicroserviceUtils { private static final String DEFAULT_SCHEMA_REGISTRY_URL = "http://localhost:8081"; public static final long MIN = 60 * 1000L; - public static String parseArgsAndConfigure(String[] args) { + public static String parseArgsAndConfigure(final String[] args) { if (args.length > 2) { throw new IllegalArgumentException("usage: ... " + "[ (optional, default: " + DEFAULT_BOOTSTRAP_SERVERS + ")] " + @@ -49,9 +49,10 @@ public static String parseArgsAndConfigure(String[] args) { return bootstrapServers; } - public static Properties baseStreamsConfig(String bootstrapServers, String stateDir, - String appId) { - Properties config = new Properties(); + public static Properties baseStreamsConfig(final String bootstrapServers, + final String stateDir, + final String appId) { + final Properties config = new Properties(); // Workaround for a known issue with RocksDB in environments where you have only 1 cpu core. config.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, CustomRocksDBConfig.class); config.put(StreamsConfig.APPLICATION_ID_CONFIG, appId); @@ -72,7 +73,7 @@ public void setConfig(final String storeName, final Options options, // Workaround: We must ensure that the parallelism is set to >= 2. There seems to be a known // issue with RocksDB where explicitly setting the parallelism to 1 causes issues (even though // 1 seems to be RocksDB's default for this configuration). - int compactionParallelism = Math.max(Runtime.getRuntime().availableProcessors(), 2); + final int compactionParallelism = Math.max(Runtime.getRuntime().availableProcessors(), 2); // Set number of compaction threads (but not flush threads). options.setIncreaseParallelism(compactionParallelism); } @@ -82,7 +83,7 @@ public void setConfig(final String storeName, final Options options, public static final class ProductTypeSerde implements Serde { @Override - public void configure(Map map, boolean b) { + public void configure(final Map map, final boolean b) { } @Override @@ -93,11 +94,11 @@ public void close() { public Serializer serializer() { return new Serializer() { @Override - public void configure(Map map, boolean b) { + public void configure(final Map map, final boolean b) { } @Override - public byte[] serialize(String topic, Product pt) { + public byte[] serialize(final String topic, final Product pt) { return pt.toString().getBytes(); } @@ -111,11 +112,11 @@ public void close() { public Deserializer deserializer() { return new Deserializer() { @Override - public void configure(Map map, boolean b) { + public void configure(final Map map, final boolean b) { } @Override - public Product deserialize(String topic, byte[] bytes) { + public Product deserialize(final String topic, final byte[] bytes) { return Product.valueOf(new String(bytes)); } @@ -126,7 +127,7 @@ public void close() { } } - public static void setTimeout(long timeout, AsyncResponse asyncResponse) { + public static void setTimeout(final long timeout, final AsyncResponse asyncResponse) { asyncResponse.setTimeout(timeout, TimeUnit.MILLISECONDS); asyncResponse.setTimeoutHandler(resp -> resp.resume( Response.status(Response.Status.GATEWAY_TIMEOUT) @@ -134,33 +135,33 @@ public static void setTimeout(long timeout, AsyncResponse asyncResponse) { .build())); } - public static Server startJetty(int port, Object binding) { - ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); + public static Server startJetty(final int port, final Object binding) { + final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); context.setContextPath("/"); - Server jettyServer = new Server(port); + final Server jettyServer = new Server(port); jettyServer.setHandler(context); - ResourceConfig rc = new ResourceConfig(); + final ResourceConfig rc = new ResourceConfig(); rc.register(binding); rc.register(JacksonFeature.class); - ServletContainer sc = new ServletContainer(rc); - ServletHolder holder = new ServletHolder(sc); + final ServletContainer sc = new ServletContainer(rc); + final ServletHolder holder = new ServletHolder(sc); context.addServlet(holder, "/*"); try { jettyServer.start(); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } log.info("Listening on " + jettyServer.getURI()); return jettyServer; } - public static KafkaProducer startProducer(String bootstrapServers, - Schemas.Topic topic) { - Properties producerConfig = new Properties(); + public static KafkaProducer startProducer(final String bootstrapServers, + final Schemas.Topic topic) { + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, String.valueOf(Integer.MAX_VALUE)); @@ -171,12 +172,12 @@ public static KafkaProducer startProducer(String bootstrapServers, topic.valueSerde().serializer()); } - public static void addShutdownHookAndBlock(Service service) throws InterruptedException { + public static void addShutdownHookAndBlock(final Service service) throws InterruptedException { Thread.currentThread().setUncaughtExceptionHandler((t, e) -> service.stop()); Runtime.getRuntime().addShutdownHook(new Thread(() -> { try { service.stop(); - } catch (Exception ignored) { + } catch (final Exception ignored) { } })); Thread.currentThread().join(); diff --git a/src/main/java/io/confluent/examples/streams/microservices/util/Paths.java b/src/main/java/io/confluent/examples/streams/microservices/util/Paths.java index f51ffb6788..cf93e49fc1 100644 --- a/src/main/java/io/confluent/examples/streams/microservices/util/Paths.java +++ b/src/main/java/io/confluent/examples/streams/microservices/util/Paths.java @@ -2,25 +2,25 @@ public class Paths { - private String base; + private final String base; - public Paths(String host, int port) { + public Paths(final String host, final int port) { base = "http://" + host + ":" + port; } - public String urlGet(int id) { + public String urlGet(final int id) { return base + "/v1/orders/" + id; } - public String urlGet(String id) { + public String urlGet(final String id) { return base + "/v1/orders/" + id; } - public String urlGetValidated(int id) { + public String urlGetValidated(final int id) { return base + "/v1/orders/" + id + "/validated"; } - public String urlGetValidated(String id) { + public String urlGetValidated(final String id) { return base + "/v1/orders/" + id + "/validated"; } diff --git a/src/main/java/io/confluent/examples/streams/utils/PriorityQueueDeserializer.java b/src/main/java/io/confluent/examples/streams/utils/PriorityQueueDeserializer.java index 1581cb1bc0..20c0bed21c 100644 --- a/src/main/java/io/confluent/examples/streams/utils/PriorityQueueDeserializer.java +++ b/src/main/java/io/confluent/examples/streams/utils/PriorityQueueDeserializer.java @@ -35,12 +35,12 @@ public PriorityQueueDeserializer(final Comparator comparator, final Deseriali } @Override - public void configure(Map configs, boolean isKey) { + public void configure(final Map configs, final boolean isKey) { // do nothing } @Override - public PriorityQueue deserialize(String s, byte[] bytes) { + public PriorityQueue deserialize(final String s, final byte[] bytes) { if (bytes == null || bytes.length == 0) { return null; } @@ -53,7 +53,7 @@ public PriorityQueue deserialize(String s, byte[] bytes) { dataInputStream.read(valueBytes); priorityQueue.add(valueDeserializer.deserialize(s, valueBytes)); } - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException("Unable to deserialize PriorityQueue", e); } return priorityQueue; diff --git a/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerde.java b/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerde.java index a9fe262c40..5d2a12f34a 100644 --- a/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerde.java +++ b/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerde.java @@ -44,7 +44,7 @@ public Deserializer> deserializer() { } @Override - public void configure(Map configs, boolean isKey) { + public void configure(final Map configs, final boolean isKey) { inner.serializer().configure(configs, isKey); inner.deserializer().configure(configs, isKey); } diff --git a/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerializer.java b/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerializer.java index f19174fc61..3d543b5b9e 100644 --- a/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerializer.java +++ b/src/main/java/io/confluent/examples/streams/utils/PriorityQueueSerializer.java @@ -35,12 +35,12 @@ public PriorityQueueSerializer(final Comparator comparator, final Serializer< this.valueSerializer = valueSerializer; } @Override - public void configure(Map configs, boolean isKey) { + public void configure(final Map configs, final boolean isKey) { // do nothing } @Override - public byte[] serialize(String topic, PriorityQueue queue) { + public byte[] serialize(final String topic, final PriorityQueue queue) { final int size = queue.size(); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final DataOutputStream out = new DataOutputStream(baos); @@ -53,7 +53,7 @@ public byte[] serialize(String topic, PriorityQueue queue) { out.write(bytes); } out.close(); - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException("unable to serialize PriorityQueue", e); } return baos.toByteArray(); diff --git a/src/main/java/io/confluent/examples/streams/utils/WindowedSerde.java b/src/main/java/io/confluent/examples/streams/utils/WindowedSerde.java index ea7321152f..1ed1c358f5 100644 --- a/src/main/java/io/confluent/examples/streams/utils/WindowedSerde.java +++ b/src/main/java/io/confluent/examples/streams/utils/WindowedSerde.java @@ -29,7 +29,7 @@ public class WindowedSerde implements Serde> { private final Serde> inner; - public WindowedSerde(Serde serde) { + public WindowedSerde(final Serde serde) { inner = Serdes.serdeFrom( new WindowedSerializer<>(serde.serializer()), new WindowedDeserializer<>(serde.deserializer())); @@ -46,7 +46,7 @@ public Deserializer> deserializer() { } @Override - public void configure(Map configs, boolean isKey) { + public void configure(final Map configs, final boolean isKey) { inner.serializer().configure(configs, isKey); inner.deserializer().configure(configs, isKey); } diff --git a/src/test/java/io/confluent/examples/streams/EventDeduplicationLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/EventDeduplicationLambdaIntegrationTest.java index 9790168c94..346748efc6 100644 --- a/src/test/java/io/confluent/examples/streams/EventDeduplicationLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/EventDeduplicationLambdaIntegrationTest.java @@ -122,7 +122,7 @@ private static class DeduplicationTransformer implements Transformer idExtractor) { + DeduplicationTransformer(final long maintainDurationPerEventInMs, final KeyValueMapper idExtractor) { if (maintainDurationPerEventInMs < 1) { throw new IllegalArgumentException("maintain duration per event must be >= 1"); } @@ -139,11 +139,11 @@ public void init(final ProcessorContext context) { } public KeyValue transform(final K key, final V value) { - E eventId = idExtractor.apply(key, value); + final E eventId = idExtractor.apply(key, value); if (eventId == null) { return KeyValue.pair(key, value); } else { - KeyValue output; + final KeyValue output; if (isDuplicate(eventId)) { output = null; updateTimestampOfExistingEventToPreventExpiry(eventId, context.timestamp()); @@ -156,21 +156,21 @@ public KeyValue transform(final K key, final V value) { } private boolean isDuplicate(final E eventId) { - long eventTime = context.timestamp(); - WindowStoreIterator timeIterator = eventIdStore.fetch( + final long eventTime = context.timestamp(); + final WindowStoreIterator timeIterator = eventIdStore.fetch( eventId, eventTime - leftDurationMs, eventTime + rightDurationMs); - boolean isDuplicate = timeIterator.hasNext(); + final boolean isDuplicate = timeIterator.hasNext(); timeIterator.close(); return isDuplicate; } - private void updateTimestampOfExistingEventToPreventExpiry(final E eventId, long newTimestamp) { + private void updateTimestampOfExistingEventToPreventExpiry(final E eventId, final long newTimestamp) { eventIdStore.put(eventId, newTimestamp, newTimestamp); } - private void rememberNewEvent(final E eventId, long timestamp) { + private void rememberNewEvent(final E eventId, final long timestamp) { eventIdStore.put(eventId, timestamp, timestamp); } @@ -190,19 +190,19 @@ public void close() { @Test public void shouldRemoveDuplicatesFromTheInput() throws Exception { - String firstId = UUID.randomUUID().toString(); // e.g. "4ff3cb44-abcb-46e3-8f9a-afb7cc74fbb8" - String secondId = UUID.randomUUID().toString(); - String thirdId = UUID.randomUUID().toString(); - List inputValues = Arrays.asList(firstId, secondId, firstId, firstId, secondId, thirdId, + final String firstId = UUID.randomUUID().toString(); // e.g. "4ff3cb44-abcb-46e3-8f9a-afb7cc74fbb8" + final String secondId = UUID.randomUUID().toString(); + final String thirdId = UUID.randomUUID().toString(); + final List inputValues = Arrays.asList(firstId, secondId, firstId, firstId, secondId, thirdId, thirdId, firstId, secondId); - List expectedValues = Arrays.asList(firstId, secondId, thirdId); + final List expectedValues = Arrays.asList(firstId, secondId, thirdId); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "deduplication-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); @@ -220,18 +220,18 @@ public void shouldRemoveDuplicatesFromTheInput() throws Exception { // The actual value depends on your use case. To reduce memory and disk usage, you could // decrease the size to purge old windows more frequently at the cost of potentially missing out // on de-duplicating late-arriving records. - long maintainDurationPerEventInMs = TimeUnit.MINUTES.toMillis(10); + final long maintainDurationPerEventInMs = TimeUnit.MINUTES.toMillis(10); // The number of segments has no impact on "correctness". // Using more segments implies larger overhead but allows for more fined grained record expiration // Note: the specified retention time is a _minimum_ time span and no strict upper time bound - int numberOfSegments = 3; + final int numberOfSegments = 3; // retention period must be at least window size -- for this use case, we don't need a longer retention period // and thus just use the window size as retention time - long retentionPeriod = maintainDurationPerEventInMs; + final long retentionPeriod = maintainDurationPerEventInMs; - StoreBuilder> dedupStoreBuilder = Stores.windowStoreBuilder( + final StoreBuilder> dedupStoreBuilder = Stores.windowStoreBuilder( Stores.persistentWindowStore(storeName, retentionPeriod, numberOfSegments, @@ -244,8 +244,8 @@ public void shouldRemoveDuplicatesFromTheInput() throws Exception { builder.addStateStore(dedupStoreBuilder); - KStream input = builder.stream(inputTopic); - KStream deduplicated = input.transform( + final KStream input = builder.stream(inputTopic); + final KStream deduplicated = input.transform( // In this example, we assume that the record value as-is represents a unique event ID by // which we can perform de-duplication. If your records are different, adapt the extractor // function as needed. @@ -253,13 +253,13 @@ public void shouldRemoveDuplicatesFromTheInput() throws Exception { storeName); deduplicated.to(outputTopic); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -270,13 +270,13 @@ public void shouldRemoveDuplicatesFromTheInput() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "deduplication-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size()); streams.close(); assertThat(actualValues).containsExactlyElementsOf(expectedValues); diff --git a/src/test/java/io/confluent/examples/streams/FanoutLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/FanoutLambdaIntegrationTest.java index 6f38fc2b8f..c04b6db25b 100644 --- a/src/test/java/io/confluent/examples/streams/FanoutLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/FanoutLambdaIntegrationTest.java @@ -77,34 +77,34 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldFanoutTheInput() throws Exception { - List inputValues = Arrays.asList("Hello", "World"); - List expectedValuesForB = inputValues.stream().map(String::toUpperCase).collect(Collectors.toList()); - List expectedValuesForC = inputValues.stream().map(String::toLowerCase).collect(Collectors.toList()); + final List inputValues = Arrays.asList("Hello", "World"); + final List expectedValuesForB = inputValues.stream().map(String::toUpperCase).collect(Collectors.toList()); + final List expectedValuesForC = inputValues.stream().map(String::toLowerCase).collect(Collectors.toList()); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "fanout-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - KStream stream1 = builder.stream(inputTopicA); - KStream stream2 = stream1.mapValues(String::toUpperCase); - KStream stream3 = stream1.mapValues(String::toLowerCase); + final KStream stream1 = builder.stream(inputTopicA); + final KStream stream2 = stream1.mapValues(String::toUpperCase); + final KStream stream3 = stream1.mapValues(String::toLowerCase); stream2.to(outputTopicB); stream3.to(outputTopicC); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -117,24 +117,24 @@ public void shouldFanoutTheInput() throws Exception { // // Verify output topic B - Properties consumerConfigB = new Properties(); + final Properties consumerConfigB = new Properties(); consumerConfigB.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfigB.put(ConsumerConfig.GROUP_ID_CONFIG, "fanout-lambda-integration-test-standard-consumer-topicB"); consumerConfigB.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfigB.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfigB.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List actualValuesForB = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfigB, + final List actualValuesForB = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfigB, outputTopicB, inputValues.size()); assertThat(actualValuesForB).isEqualTo(expectedValuesForB); // Verify output topic C - Properties consumerConfigC = new Properties(); + final Properties consumerConfigC = new Properties(); consumerConfigC.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfigC.put(ConsumerConfig.GROUP_ID_CONFIG, "fanout-lambda-integration-test-standard-consumer-topicC"); consumerConfigC.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfigC.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfigC.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List actualValuesForC = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfigC, + final List actualValuesForC = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfigC, outputTopicC, inputValues.size()); streams.close(); assertThat(actualValuesForC).isEqualTo(expectedValuesForC); diff --git a/src/test/java/io/confluent/examples/streams/GenericAvroIntegrationTest.java b/src/test/java/io/confluent/examples/streams/GenericAvroIntegrationTest.java index 28838125aa..be777c6f50 100644 --- a/src/test/java/io/confluent/examples/streams/GenericAvroIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/GenericAvroIntegrationTest.java @@ -65,20 +65,20 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldRoundTripGenericAvroDataThroughKafka() throws Exception { - Schema schema = new Schema.Parser().parse( + final Schema schema = new Schema.Parser().parse( getClass().getResourceAsStream("/avro/io/confluent/examples/streams/wikifeed.avsc")); - GenericRecord record = new GenericData.Record(schema); + final GenericRecord record = new GenericData.Record(schema); record.put("user", "alice"); record.put("is_new", true); record.put("content", "lorem ipsum"); - List inputValues = Collections.singletonList(record); + final List inputValues = Collections.singletonList(record); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "generic-avro-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); @@ -105,16 +105,16 @@ public void shouldRoundTripGenericAvroDataThroughKafka() throws Exception { genericAvroSerde.configure( Collections.singletonMap(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()), isKeySerde); - KStream stream = builder.stream(inputTopic); + final KStream stream = builder.stream(inputTopic); stream.to(outputTopic, Produced.with(stringSerde, genericAvroSerde)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -126,14 +126,14 @@ public void shouldRoundTripGenericAvroDataThroughKafka() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "generic-avro-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class); consumerConfig.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, inputValues.size()); streams.close(); assertEquals(inputValues, actualValues); diff --git a/src/test/java/io/confluent/examples/streams/HandlingCorruptedInputRecordsIntegrationTest.java b/src/test/java/io/confluent/examples/streams/HandlingCorruptedInputRecordsIntegrationTest.java index dcfe2eb97c..8b2ac7efb2 100644 --- a/src/test/java/io/confluent/examples/streams/HandlingCorruptedInputRecordsIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/HandlingCorruptedInputRecordsIntegrationTest.java @@ -71,40 +71,40 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldIgnoreCorruptInputRecords() throws Exception { - List inputValues = Arrays.asList(1L, 2L, 3L); - List expectedValues = inputValues.stream().map(x -> 2 * x).collect(Collectors.toList()); + final List inputValues = Arrays.asList(1L, 2L, 3L); + final List expectedValues = inputValues.stream().map(x -> 2 * x).collect(Collectors.toList()); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "failure-handling-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - Serde stringSerde = Serdes.String(); - Serde longSerde = Serdes.Long(); + final Serde stringSerde = Serdes.String(); + final Serde longSerde = Serdes.Long(); - KStream input = builder.stream(inputTopic); + final KStream input = builder.stream(inputTopic); // Note how the returned stream is of type `KStream`. - KStream doubled = input.flatMap( + final KStream doubled = input.flatMap( (k, v) -> { try { // Attempt deserialization - String key = stringSerde.deserializer().deserialize("input-topic", k); - long value = longSerde.deserializer().deserialize("input-topic", v); + final String key = stringSerde.deserializer().deserialize("input-topic", k); + final long value = longSerde.deserializer().deserialize("input-topic", v); // Ok, the record is valid (not corrupted). Let's take the // opportunity to also process the record in some way so that // we haven't paid the deserialization cost just for "poison pill" // checking. return Collections.singletonList(KeyValue.pair(key, 2 * value)); - } catch (SerializationException e) { + } catch (final SerializationException e) { // Ignore/skip the corrupted record by catching the exception. // Optionally, we can log the fact that we did so: System.err.println("Could not deserialize record: " + e.getMessage()); @@ -116,13 +116,13 @@ public void shouldIgnoreCorruptInputRecords() throws Exception { // Write the processing results (which was generated from valid records only) to Kafka. doubled.to(outputTopic, Produced.with(stringSerde, longSerde)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some corrupt input data to the input topic. // - Properties producerConfigForCorruptRecords = new Properties(); + final Properties producerConfigForCorruptRecords = new Properties(); producerConfigForCorruptRecords.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfigForCorruptRecords.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfigForCorruptRecords.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -134,7 +134,7 @@ public void shouldIgnoreCorruptInputRecords() throws Exception { // // Step 3: Produce some (valid) input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -145,13 +145,13 @@ public void shouldIgnoreCorruptInputRecords() throws Exception { // // Step 4: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "map-function-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size()); streams.close(); assertThat(actualValues).isEqualTo(expectedValues); diff --git a/src/test/java/io/confluent/examples/streams/IntegrationTestUtils.java b/src/test/java/io/confluent/examples/streams/IntegrationTestUtils.java index 0563249084..484f82a164 100644 --- a/src/test/java/io/confluent/examples/streams/IntegrationTestUtils.java +++ b/src/test/java/io/confluent/examples/streams/IntegrationTestUtils.java @@ -58,8 +58,10 @@ public class IntegrationTestUtils { * @param maxMessages Maximum number of messages to read via the consumer. * @return The values retrieved via the consumer. */ - public static List readValues(String topic, Properties consumerConfig, int maxMessages) { - List> kvs = readKeyValues(topic, consumerConfig, maxMessages); + public static List readValues(final String topic, + final Properties consumerConfig, + final int maxMessages) { + final List> kvs = readKeyValues(topic, consumerConfig, maxMessages); return kvs.stream().map(kv -> kv.value).collect(Collectors.toList()); } @@ -71,7 +73,8 @@ public static List readValues(String topic, Properties consumerConfig, * @param consumerConfig Kafka consumer configuration * @return The KeyValue elements retrieved via the consumer. */ - public static List> readKeyValues(String topic, Properties consumerConfig) { + public static List> readKeyValues(final String topic, + final Properties consumerConfig) { return readKeyValues(topic, consumerConfig, UNLIMITED_MESSAGES); } @@ -84,17 +87,19 @@ public static List> readKeyValues(String topic, Properties * @param maxMessages Maximum number of messages to read via the consumer * @return The KeyValue elements retrieved via the consumer */ - public static List> readKeyValues(String topic, Properties consumerConfig, int maxMessages) { - KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig); + public static List> readKeyValues(final String topic, + final Properties consumerConfig, + final int maxMessages) { + final KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig); consumer.subscribe(Collections.singletonList(topic)); - int pollIntervalMs = 100; - int maxTotalPollTimeMs = 2000; + final int pollIntervalMs = 100; + final int maxTotalPollTimeMs = 2000; int totalPollTimeMs = 0; - List> consumedValues = new ArrayList<>(); + final List> consumedValues = new ArrayList<>(); while (totalPollTimeMs < maxTotalPollTimeMs && continueConsuming(consumedValues.size(), maxMessages)) { totalPollTimeMs += pollIntervalMs; - ConsumerRecords records = consumer.poll(pollIntervalMs); - for (ConsumerRecord record : records) { + final ConsumerRecords records = consumer.poll(pollIntervalMs); + for (final ConsumerRecord record : records) { consumedValues.add(new KeyValue<>(record.key(), record.value())); } } @@ -102,7 +107,7 @@ public static List> readKeyValues(String topic, Properties return consumedValues; } - private static boolean continueConsuming(int messagesConsumed, int maxMessages) { + private static boolean continueConsuming(final int messagesConsumed, final int maxMessages) { return maxMessages <= 0 || messagesConsumed < maxMessages; } @@ -114,11 +119,11 @@ private static boolean continueConsuming(int messagesConsumed, int maxMessages) * @param Value type of the data records */ public static void produceKeyValuesSynchronously( - String topic, Collection> records, Properties producerConfig) + final String topic, final Collection> records, final Properties producerConfig) throws ExecutionException, InterruptedException { - Producer producer = new KafkaProducer<>(producerConfig); - for (KeyValue record : records) { - Future f = producer.send( + final Producer producer = new KafkaProducer<>(producerConfig); + for (final KeyValue record : records) { + final Future f = producer.send( new ProducerRecord<>(topic, record.key, record.value)); f.get(); } @@ -127,16 +132,17 @@ public static void produceKeyValuesSynchronously( } public static void produceValuesSynchronously( - String topic, Collection records, Properties producerConfig) + final String topic, final Collection records, final Properties producerConfig) throws ExecutionException, InterruptedException { - Collection> keyedRecords = + final Collection> keyedRecords = records.stream().map(record -> new KeyValue<>(null, record)).collect(Collectors.toList()); produceKeyValuesSynchronously(topic, keyedRecords, producerConfig); } - public static List> waitUntilMinKeyValueRecordsReceived(Properties consumerConfig, - String topic, - int expectedNumRecords) throws InterruptedException { + public static List> waitUntilMinKeyValueRecordsReceived(final Properties consumerConfig, + final String topic, + final int expectedNumRecords) + throws InterruptedException { return waitUntilMinKeyValueRecordsReceived(consumerConfig, topic, expectedNumRecords, DEFAULT_TIMEOUT); } @@ -151,14 +157,15 @@ public static List> waitUntilMinKeyValueRecordsReceived(Pr * @return All the records consumed, or null if no records are consumed * @throws AssertionError if the given wait time elapses */ - public static List> waitUntilMinKeyValueRecordsReceived(Properties consumerConfig, - String topic, - int expectedNumRecords, - long waitTime) throws InterruptedException { - List> accumData = new ArrayList<>(); - long startTime = System.currentTimeMillis(); + public static List> waitUntilMinKeyValueRecordsReceived(final Properties consumerConfig, + final String topic, + final int expectedNumRecords, + final long waitTime) + throws InterruptedException { + final List> accumData = new ArrayList<>(); + final long startTime = System.currentTimeMillis(); while (true) { - List> readData = readKeyValues(topic, consumerConfig); + final List> readData = readKeyValues(topic, consumerConfig); accumData.addAll(readData); if (accumData.size() >= expectedNumRecords) return accumData; @@ -170,9 +177,10 @@ public static List> waitUntilMinKeyValueRecordsReceived(Pr } } - public static List waitUntilMinValuesRecordsReceived(Properties consumerConfig, - String topic, - int expectedNumRecords) throws InterruptedException { + public static List waitUntilMinValuesRecordsReceived(final Properties consumerConfig, + final String topic, + final int expectedNumRecords) + throws InterruptedException { return waitUntilMinValuesRecordsReceived(consumerConfig, topic, expectedNumRecords, DEFAULT_TIMEOUT); } @@ -187,14 +195,14 @@ public static List waitUntilMinValuesRecordsReceived(Properties consumerC * @return All the records consumed, or null if no records are consumed * @throws AssertionError if the given wait time elapses */ - public static List waitUntilMinValuesRecordsReceived(Properties consumerConfig, - String topic, - int expectedNumRecords, - long waitTime) throws InterruptedException { - List accumData = new ArrayList<>(); - long startTime = System.currentTimeMillis(); + public static List waitUntilMinValuesRecordsReceived(final Properties consumerConfig, + final String topic, + final int expectedNumRecords, + final long waitTime) throws InterruptedException { + final List accumData = new ArrayList<>(); + final long startTime = System.currentTimeMillis(); while (true) { - List readData = readValues(topic, consumerConfig, expectedNumRecords); + final List readData = readValues(topic, consumerConfig, expectedNumRecords); accumData.addAll(readData); if (accumData.size() >= expectedNumRecords) return accumData; @@ -226,7 +234,7 @@ public static T waitUntilStoreIsQueryable(final String storeName, while (true) { try { return streams.store(storeName, queryableStoreType); - } catch (InvalidStateStoreException ignored) { + } catch (final InvalidStateStoreException ignored) { // store not yet ready for querying Thread.sleep(50); } @@ -241,9 +249,10 @@ public static T waitUntilStoreIsQueryable(final String storeName, * @param the store's key type * @param the store's value type */ - public static void assertThatKeyValueStoreContains(ReadOnlyKeyValueStore store, Map expected) { - for (K key : expected.keySet()) { - V actualValue = store.get(key); + public static void assertThatKeyValueStoreContains(final ReadOnlyKeyValueStore store, + final Map expected) { + for (final K key : expected.keySet()) { + final V actualValue = store.get(key); assertThat(actualValue).isEqualTo(expected.get(key)); } } @@ -256,19 +265,20 @@ public static void assertThatKeyValueStoreContains(ReadOnlyKeyValueStore< * @param the store's key type * @param the store's value type */ - public static void assertThatOldestWindowContains(ReadOnlyWindowStore store, Map expected) { - long fromBeginningOfTimeMs = 0; - long toNowInProcessingTimeMs = System.currentTimeMillis(); - for (K key : expected.keySet()) { + public static void assertThatOldestWindowContains(final ReadOnlyWindowStore store, + final Map expected) { + final long fromBeginningOfTimeMs = 0; + final long toNowInProcessingTimeMs = System.currentTimeMillis(); + for (final K key : expected.keySet()) { long windowCounter = 0; // For each key, `ReadOnlyWindowStore#fetch()` guarantees that the iterator iterates through // the windows in ascending-time order; that is, the first window (if any) is the oldest // available window for that key. - try (WindowStoreIterator iterator = store.fetch(key, fromBeginningOfTimeMs, toNowInProcessingTimeMs)) { + try (final WindowStoreIterator iterator = store.fetch(key, fromBeginningOfTimeMs, toNowInProcessingTimeMs)) { while (iterator.hasNext() && windowCounter <= 1) { windowCounter++; - KeyValue next = iterator.next(); - V actualValue = next.value; + final KeyValue next = iterator.next(); + final V actualValue = next.value; assertThat(actualValue).isEqualTo(expected.get(key)); } } diff --git a/src/test/java/io/confluent/examples/streams/MapFunctionLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/MapFunctionLambdaIntegrationTest.java index 63895216c0..ae6433bb47 100644 --- a/src/test/java/io/confluent/examples/streams/MapFunctionLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/MapFunctionLambdaIntegrationTest.java @@ -59,32 +59,32 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldUppercaseTheInput() throws Exception { - List inputValues = Arrays.asList("hello", "world"); - List expectedValues = inputValues.stream().map(String::toUpperCase).collect(Collectors.toList()); + final List inputValues = Arrays.asList("hello", "world"); + final List expectedValues = inputValues.stream().map(String::toUpperCase).collect(Collectors.toList()); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "map-function-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - KStream input = builder.stream(inputTopic); - KStream uppercased = input.mapValues(String::toUpperCase); + final KStream input = builder.stream(inputTopic); + final KStream uppercased = input.mapValues(String::toUpperCase); uppercased.to(outputTopic); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -95,13 +95,13 @@ public void shouldUppercaseTheInput() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "map-function-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size()); streams.close(); assertThat(actualValues).isEqualTo(expectedValues); diff --git a/src/test/java/io/confluent/examples/streams/MixAndMatchLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/MixAndMatchLambdaIntegrationTest.java index b7514034c4..f5fc74f59b 100644 --- a/src/test/java/io/confluent/examples/streams/MixAndMatchLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/MixAndMatchLambdaIntegrationTest.java @@ -85,7 +85,7 @@ private static class AnonymizeIpAddressTransformer implements Transformer transform(final byte[] recordKey, final String recordValue) { // The record value contains the IP address in string representation. // The original record key is ignored because we don't need it for this logic. - String anonymizedIpAddress = anonymizeIpAddress(recordValue); + final String anonymizedIpAddress = anonymizeIpAddress(recordValue); return KeyValue.pair(recordKey, anonymizedIpAddress); } @@ -107,12 +107,12 @@ public KeyValue transform(final byte[] recordKey, final String r * @param ipAddress The IPv4 address * @return Anonymized IPv4 address. */ - private String anonymizeIpAddress(String ipAddress) { + private String anonymizeIpAddress(final String ipAddress) { return ipv4AddressPattern.matcher(ipAddress).replaceAll("${keep}XXX"); } @Override - public KeyValue punctuate(long timestamp) { + public KeyValue punctuate(final long timestamp) { // We don't need any periodic actions in this transformer. Returning null achieves that. return null; } @@ -127,34 +127,34 @@ public void close() { @Test public void shouldAnonymizeTheInput() throws Exception { - List inputValues = Arrays.asList("Hello, 1.2.3.4!", "foo 192.168.1.55 bar"); - List expectedValues = Arrays.asList("HELLO, 1.2.3.XXX!", "FOO 192.168.1.XXX BAR"); + final List inputValues = Arrays.asList("Hello, 1.2.3.4!", "foo 192.168.1.55 bar"); + final List expectedValues = Arrays.asList("HELLO, 1.2.3.XXX!", "FOO 192.168.1.XXX BAR"); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "mix-and-match-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - KStream input = builder.stream(inputTopic); - KStream uppercasedAndAnonymized = input + final KStream input = builder.stream(inputTopic); + final KStream uppercasedAndAnonymized = input .mapValues(String::toUpperCase) .transform(AnonymizeIpAddressTransformer::new); uppercasedAndAnonymized.to(outputTopic); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -165,13 +165,13 @@ public void shouldAnonymizeTheInput() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "mix-and-match-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size()); streams.close(); assertThat(actualValues).isEqualTo(expectedValues); diff --git a/src/test/java/io/confluent/examples/streams/PassThroughIntegrationTest.java b/src/test/java/io/confluent/examples/streams/PassThroughIntegrationTest.java index 769304f468..cfea4f01c6 100644 --- a/src/test/java/io/confluent/examples/streams/PassThroughIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/PassThroughIntegrationTest.java @@ -54,7 +54,7 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldWriteTheInputDataAsIsToTheOutputTopic() throws Exception { - List inputValues = Arrays.asList( + final List inputValues = Arrays.asList( "hello world", "the world is not enough", "the world of the stock market is coming to an end" @@ -63,9 +63,9 @@ public void shouldWriteTheInputDataAsIsToTheOutputTopic() throws Exception { // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "pass-through-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -75,13 +75,13 @@ public void shouldWriteTheInputDataAsIsToTheOutputTopic() throws Exception { // Write the input data as-is to the output topic. builder.stream(inputTopic).to(outputTopic); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -92,13 +92,13 @@ public void shouldWriteTheInputDataAsIsToTheOutputTopic() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "pass-through-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, inputValues.size()); streams.close(); assertThat(actualValues).isEqualTo(inputValues); diff --git a/src/test/java/io/confluent/examples/streams/SpecificAvroIntegrationTest.java b/src/test/java/io/confluent/examples/streams/SpecificAvroIntegrationTest.java index 725d2c1de3..87f74b8ad9 100644 --- a/src/test/java/io/confluent/examples/streams/SpecificAvroIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/SpecificAvroIntegrationTest.java @@ -64,16 +64,16 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldRoundTripSpecificAvroDataThroughKafka() throws Exception { - List inputValues = Collections.singletonList( + final List inputValues = Collections.singletonList( WikiFeed.newBuilder().setUser("alice").setIsNew(true).setContent("lorem ipsum").build() ); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "specific-avro-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); @@ -100,16 +100,16 @@ public void shouldRoundTripSpecificAvroDataThroughKafka() throws Exception { specificAvroSerde.configure( Collections.singletonMap(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()), isKeySerde); - KStream stream = builder.stream(inputTopic); + final KStream stream = builder.stream(inputTopic); stream.to(outputTopic, Produced.with(stringSerde, specificAvroSerde)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -121,7 +121,7 @@ public void shouldRoundTripSpecificAvroDataThroughKafka() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "specific-avro-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); @@ -129,7 +129,7 @@ public void shouldRoundTripSpecificAvroDataThroughKafka() throws Exception { consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class); consumerConfig.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl()); consumerConfig.put(KafkaAvroDeserializerConfig.SPECIFIC_AVRO_READER_CONFIG, true); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, inputValues.size()); streams.close(); assertEquals(inputValues, actualValues); diff --git a/src/test/java/io/confluent/examples/streams/StateStoresInTheDSLIntegrationTest.java b/src/test/java/io/confluent/examples/streams/StateStoresInTheDSLIntegrationTest.java index 4543be5ecd..f78b72f66a 100644 --- a/src/test/java/io/confluent/examples/streams/StateStoresInTheDSLIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/StateStoresInTheDSLIntegrationTest.java @@ -79,7 +79,7 @@ private static final class WordCountTransformerSupplier final private String stateStoreName; - public WordCountTransformerSupplier(String stateStoreName) { + public WordCountTransformerSupplier(final String stateStoreName) { this.stateStoreName = stateStoreName; } @@ -91,23 +91,23 @@ public Transformer> get() { @SuppressWarnings("unchecked") @Override - public void init(ProcessorContext context) { + public void init(final ProcessorContext context) { stateStore = (KeyValueStore) context.getStateStore(stateStoreName); } @Override - public KeyValue transform(byte[] key, String value) { + public KeyValue transform(final byte[] key, final String value) { // For simplification (and unlike the traditional wordcount) we assume that the value is // a single word, i.e. we don't split the value by whitespace into potentially one or more // words. - Optional count = Optional.ofNullable(stateStore.get(value)); - Long incrementedCount = count.orElse(0L) + 1; + final Optional count = Optional.ofNullable(stateStore.get(value)); + final Long incrementedCount = count.orElse(0L) + 1; stateStore.put(value, incrementedCount); return KeyValue.pair(value, incrementedCount); } @Override - public KeyValue punctuate(long timestamp) { + public KeyValue punctuate(final long timestamp) { // Not needed return null; } @@ -124,7 +124,7 @@ public void close() { @Test public void shouldAllowStateStoreAccessFromDSL() throws Exception { - List inputValues = Arrays.asList( + final List inputValues = Arrays.asList( "foo", "bar", "foo", @@ -132,7 +132,7 @@ public void shouldAllowStateStoreAccessFromDSL() throws Exception { "bar", "foo"); - List> expectedRecords = Arrays.asList( + final List> expectedRecords = Arrays.asList( new KeyValue<>("foo", 1L), new KeyValue<>("bar", 1L), new KeyValue<>("foo", 2L), @@ -144,9 +144,9 @@ public void shouldAllowStateStoreAccessFromDSL() throws Exception { // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "state-store-dsl-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName()); @@ -156,7 +156,7 @@ public void shouldAllowStateStoreAccessFromDSL() throws Exception { streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); // Create a state store manually. - StoreBuilder> wordCountsStore = Stores.keyValueStoreBuilder( + final StoreBuilder> wordCountsStore = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore("WordCountsStore"), Serdes.String(), Serdes.Long()) @@ -167,7 +167,7 @@ public void shouldAllowStateStoreAccessFromDSL() throws Exception { builder.addStateStore(wordCountsStore); // Read the input data. (In this example we ignore whatever is stored in the record keys.) - KStream words = builder.stream(inputTopic); + final KStream words = builder.stream(inputTopic); // Important (2 of 2): When we call `transform()` we must provide the name of the state store // that is going to be used by the `Transformer` returned by `WordCountTransformerSupplier` as @@ -176,18 +176,18 @@ public void shouldAllowStateStoreAccessFromDSL() throws Exception { // Otherwise our application will fail at run-time when attempting to operate on the state store // (within the transformer) because `ProcessorContext#getStateStore("WordCountsStore")` will // return `null`. - KStream wordCounts = + final KStream wordCounts = words.transform(new WordCountTransformerSupplier(wordCountsStore.name()), wordCountsStore.name()); wordCounts.to(outputTopic, Produced.with(Serdes.String(), Serdes.Long())); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -198,13 +198,13 @@ public void shouldAllowStateStoreAccessFromDSL() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "state-store-dsl-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); - List> actualValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, + final List> actualValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedRecords.size()); streams.close(); assertThat(actualValues).isEqualTo(expectedRecords); diff --git a/src/test/java/io/confluent/examples/streams/StreamToStreamJoinIntegrationTest.java b/src/test/java/io/confluent/examples/streams/StreamToStreamJoinIntegrationTest.java index a8c14b225f..fbebcf6efc 100644 --- a/src/test/java/io/confluent/examples/streams/StreamToStreamJoinIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/StreamToStreamJoinIntegrationTest.java @@ -65,20 +65,20 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldJoinTwoStreams() throws Exception { // Input 1: Ad impressions - List> inputAdImpressions = Arrays.asList( + final List> inputAdImpressions = Arrays.asList( new KeyValue<>("car-advertisement", "shown"), new KeyValue<>("newspaper-advertisement", "shown"), new KeyValue<>("gadget-advertisement", "shown") ); // Input 2: Ad clicks - List> inputAdClicks = Arrays.asList( + final List> inputAdClicks = Arrays.asList( new KeyValue<>("newspaper-advertisement", "clicked"), new KeyValue<>("gadget-advertisement", "clicked"), new KeyValue<>("newspaper-advertisement", "clicked") ); - List> expectedResults = Arrays.asList( + final List> expectedResults = Arrays.asList( new KeyValue<>("car-advertisement", "shown/null"), new KeyValue<>("newspaper-advertisement", "shown/null"), new KeyValue<>("gadget-advertisement", "shown/null"), @@ -92,7 +92,7 @@ public void shouldJoinTwoStreams() throws Exception { // final Serde stringSerde = Serdes.String(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-stream-join-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -104,15 +104,15 @@ public void shouldJoinTwoStreams() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - StreamsBuilder builder = new StreamsBuilder(); - KStream alerts = builder.stream(adImpressionsTopic); - KStream incidents = builder.stream(adClicksTopic); + final StreamsBuilder builder = new StreamsBuilder(); + final KStream alerts = builder.stream(adImpressionsTopic); + final KStream incidents = builder.stream(adClicksTopic); // In this example, we opt to perform an OUTER JOIN between the two streams. We picked this // join type to show how the Streams API will send further join updates downstream whenever, // for the same join key (e.g. "newspaper-advertisement"), we receive an update from either of // the two joined streams during the defined join window. - KStream impressionsAndClicks = alerts.outerJoin(incidents, + final KStream impressionsAndClicks = alerts.outerJoin(incidents, (impressionValue, clickValue) -> impressionValue + "/" + clickValue, // KStream-KStream joins are always windowed joins, hence we must provide a join window. JoinWindows.of(TimeUnit.SECONDS.toMillis(5))); @@ -120,13 +120,13 @@ public void shouldJoinTwoStreams() throws Exception { // Write the results to the output topic. impressionsAndClicks.to(outputTopic); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Publish ad impressions. // - Properties alertsProducerConfig = new Properties(); + final Properties alertsProducerConfig = new Properties(); alertsProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); alertsProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); alertsProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -137,7 +137,7 @@ public void shouldJoinTwoStreams() throws Exception { // // Step 3: Publish ad clicks. // - Properties incidentsProducerConfig = new Properties(); + final Properties incidentsProducerConfig = new Properties(); incidentsProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); incidentsProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); incidentsProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -148,13 +148,13 @@ public void shouldJoinTwoStreams() throws Exception { // // Step 4: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "stream-stream-join-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List> actualResults = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, + final List> actualResults = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedResults.size()); streams.close(); assertThat(actualResults).containsExactlyElementsOf(expectedResults); diff --git a/src/test/java/io/confluent/examples/streams/StreamToTableJoinIntegrationTest.java b/src/test/java/io/confluent/examples/streams/StreamToTableJoinIntegrationTest.java index 63edc821fa..5153f507c2 100644 --- a/src/test/java/io/confluent/examples/streams/StreamToTableJoinIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/StreamToTableJoinIntegrationTest.java @@ -76,7 +76,7 @@ private static final class RegionWithClicks { private final String region; private final long clicks; - public RegionWithClicks(String region, long clicks) { + public RegionWithClicks(final String region, final long clicks) { if (region == null || region.isEmpty()) { throw new IllegalArgumentException("region must be set"); } @@ -100,7 +100,7 @@ public long getClicks() { @Test public void shouldCountClicksPerRegion() throws Exception { // Input 1: Clicks per user (multiple records allowed per user). - List> userClicks = Arrays.asList( + final List> userClicks = Arrays.asList( new KeyValue<>("alice", 13L), new KeyValue<>("bob", 4L), new KeyValue<>("chao", 25L), @@ -112,7 +112,7 @@ public void shouldCountClicksPerRegion() throws Exception { ); // Input 2: Region per user (multiple records allowed per user). - List> userRegions = Arrays.asList( + final List> userRegions = Arrays.asList( new KeyValue<>("alice", "asia"), /* Alice lived in Asia originally... */ new KeyValue<>("bob", "americas"), new KeyValue<>("chao", "asia"), @@ -122,7 +122,7 @@ public void shouldCountClicksPerRegion() throws Exception { new KeyValue<>("fang", "asia") ); - List> expectedClicksPerRegion = Arrays.asList( + final List> expectedClicksPerRegion = Arrays.asList( new KeyValue<>("americas", 101L), new KeyValue<>("europe", 109L), new KeyValue<>("asia", 124L) @@ -134,7 +134,7 @@ public void shouldCountClicksPerRegion() throws Exception { final Serde stringSerde = Serdes.String(); final Serde longSerde = Serdes.Long(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-table-join-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -146,13 +146,13 @@ public void shouldCountClicksPerRegion() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); // This KStream contains information such as "alice" -> 13L. // // Because this is a KStream ("record stream"), multiple records for the same user will be // considered as separate click-count events, each of which will be added to the total count. - KStream userClicksStream = builder.stream(userClicksTopic, Consumed.with(stringSerde, longSerde)); + final KStream userClicksStream = builder.stream(userClicksTopic, Consumed.with(stringSerde, longSerde)); // This KTable contains information such as "alice" -> "europe". // @@ -165,13 +165,13 @@ public void shouldCountClicksPerRegion() throws Exception { // lived in "asia") because, at the time her first user-click record is being received and // subsequently processed in the `leftJoin`, the latest region update for "alice" is "europe" // (which overrides her previous region value of "asia"). - KTable userRegionsTable = builder.table(userRegionsTopic); + final KTable userRegionsTable = builder.table(userRegionsTopic); // Compute the number of clicks per region, e.g. "europe" -> 13L. // // The resulting KTable is continuously being updated as new data records are arriving in the // input KStream `userClicksStream` and input KTable `userRegionsTable`. - KTable clicksPerRegion = userClicksStream + final KTable clicksPerRegion = userClicksStream // Join the stream against the table. // // Null values possible: In general, null values are possible for region (i.e. the value of @@ -193,7 +193,7 @@ public void shouldCountClicksPerRegion() throws Exception { // Write the (continuously updating) results to the output topic. clicksPerRegion.toStream().to(outputTopic, Produced.with(stringSerde, longSerde)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // @@ -202,7 +202,7 @@ public void shouldCountClicksPerRegion() throws Exception { // To keep this code example simple and easier to understand/reason about, we publish all // user-region records before any user-click records (cf. step 3). In practice though, // data records would typically be arriving concurrently in both input streams/topics. - Properties userRegionsProducerConfig = new Properties(); + final Properties userRegionsProducerConfig = new Properties(); userRegionsProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); userRegionsProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); userRegionsProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -213,7 +213,7 @@ public void shouldCountClicksPerRegion() throws Exception { // // Step 3: Publish some user click events. // - Properties userClicksProducerConfig = new Properties(); + final Properties userClicksProducerConfig = new Properties(); userClicksProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); userClicksProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); userClicksProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -225,13 +225,13 @@ public void shouldCountClicksPerRegion() throws Exception { // // Step 4: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "join-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); - List> actualClicksPerRegion = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, + final List> actualClicksPerRegion = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedClicksPerRegion.size()); streams.close(); assertThat(actualClicksPerRegion).containsExactlyElementsOf(expectedClicksPerRegion); diff --git a/src/test/java/io/confluent/examples/streams/SumLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/SumLambdaIntegrationTest.java index d44231ce31..e5cd150b6b 100644 --- a/src/test/java/io/confluent/examples/streams/SumLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/SumLambdaIntegrationTest.java @@ -60,15 +60,15 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldSumEvenNumbers() throws Exception { - List inputValues = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - List expectedValues = Collections.singletonList(30); + final List inputValues = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + final List expectedValues = Collections.singletonList(30); // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "sum-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); @@ -80,8 +80,8 @@ public void shouldSumEvenNumbers() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - KStream input = builder.stream(inputTopic); - KTable sumOfOddNumbers = input + final KStream input = builder.stream(inputTopic); + final KTable sumOfOddNumbers = input .filter((k, v) -> v % 2 == 0) .selectKey((k, v) -> 1) // no need to specify explicit serdes because the resulting key and value types match our default serde settings @@ -89,13 +89,13 @@ public void shouldSumEvenNumbers() throws Exception { .reduce((v1, v2) -> v1 + v2); sumOfOddNumbers.toStream().to(outputTopic); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -106,13 +106,13 @@ public void shouldSumEvenNumbers() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "sum-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); - List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, + final List actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size()); streams.close(); assertThat(actualValues).isEqualTo(expectedValues); diff --git a/src/test/java/io/confluent/examples/streams/TableToTableJoinIntegrationTest.java b/src/test/java/io/confluent/examples/streams/TableToTableJoinIntegrationTest.java index a40da4d8ee..4dce6a6fa2 100644 --- a/src/test/java/io/confluent/examples/streams/TableToTableJoinIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/TableToTableJoinIntegrationTest.java @@ -71,7 +71,7 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldJoinTwoTables() throws Exception { // Input: Region per user (multiple records allowed per user). - List> userRegionRecords = Arrays.asList( + final List> userRegionRecords = Arrays.asList( new KeyValue<>("alice", "asia"), new KeyValue<>("bob", "europe"), new KeyValue<>("alice", "europe"), @@ -80,21 +80,21 @@ public void shouldJoinTwoTables() throws Exception { ); // Input 2: Timestamp of last login per user (multiple records allowed per user) - List> userLastLoginRecords = Arrays.asList( + final List> userLastLoginRecords = Arrays.asList( new KeyValue<>("alice", 1485500000L), new KeyValue<>("bob", 1485520000L), new KeyValue<>("alice", 1485530000L), new KeyValue<>("bob", 1485560000L) ); - List> expectedResults = Arrays.asList( + final List> expectedResults = Arrays.asList( new KeyValue<>("alice", "europe/1485500000"), new KeyValue<>("bob", "asia/1485520000"), new KeyValue<>("alice", "europe/1485530000"), new KeyValue<>("bob", "asia/1485560000") ); - List> expectedResultsForJoinStateStore = Arrays.asList( + final List> expectedResultsForJoinStateStore = Arrays.asList( new KeyValue<>("alice", "europe/1485530000"), new KeyValue<>("bob", "asia/1485560000") ); @@ -105,7 +105,7 @@ public void shouldJoinTwoTables() throws Exception { final Serde stringSerde = Serdes.String(); final Serde longSerde = Serdes.Long(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "table-table-join-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -119,11 +119,11 @@ public void shouldJoinTwoTables() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - StreamsBuilder builder = new StreamsBuilder(); - KTable userRegions = builder.table(userRegionTopic); - KTable userLastLogins = builder.table(userLastLoginTopic, Consumed.with(stringSerde, longSerde)); + final StreamsBuilder builder = new StreamsBuilder(); + final KTable userRegions = builder.table(userRegionTopic); + final KTable userLastLogins = builder.table(userLastLoginTopic, Consumed.with(stringSerde, longSerde)); - String storeName = "joined-store"; + final String storeName = "joined-store"; userRegions.join(userLastLogins, (regionValue, lastLoginValue) -> regionValue + "/" + lastLoginValue, Materialized.as(storeName)) @@ -131,13 +131,13 @@ public void shouldJoinTwoTables() throws Exception { .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Publish user regions. // - Properties regionsProducerConfig = new Properties(); + final Properties regionsProducerConfig = new Properties(); regionsProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); regionsProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); regionsProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -148,7 +148,7 @@ public void shouldJoinTwoTables() throws Exception { // // Step 3: Publish user's last login timestamps. // - Properties lastLoginProducerConfig = new Properties(); + final Properties lastLoginProducerConfig = new Properties(); lastLoginProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); lastLoginProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); lastLoginProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -159,20 +159,20 @@ public void shouldJoinTwoTables() throws Exception { // // Step 4: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "stream-stream-join-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - List> actualResults = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, + final List> actualResults = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedResults.size()); // Verify the (local) state store of the joined table. // For a comprehensive demonstration of interactive queries please refer to KafkaMusicExample. - ReadOnlyKeyValueStore readOnlyKeyValueStore = + final ReadOnlyKeyValueStore readOnlyKeyValueStore = streams.store(storeName, QueryableStoreTypes.keyValueStore()); - KeyValueIterator keyValueIterator = readOnlyKeyValueStore.all(); + final KeyValueIterator keyValueIterator = readOnlyKeyValueStore.all(); assertThat(keyValueIterator).containsExactlyElementsOf(expectedResultsForJoinStateStore); streams.close(); diff --git a/src/test/java/io/confluent/examples/streams/UserCountsPerRegionLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/UserCountsPerRegionLambdaIntegrationTest.java index a9a73c9699..6251b91fba 100644 --- a/src/test/java/io/confluent/examples/streams/UserCountsPerRegionLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/UserCountsPerRegionLambdaIntegrationTest.java @@ -76,7 +76,7 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldCountUsersPerRegion() throws Exception { // Input: Region per user (multiple records allowed per user). - List> userRegionRecords = Arrays.asList( + final List> userRegionRecords = Arrays.asList( // This first record for Alice tells us that she is currently in Asia. new KeyValue<>("alice", "asia"), // First record for Bob. @@ -89,7 +89,7 @@ public void shouldCountUsersPerRegion() throws Exception { new KeyValue<>("bob", "asia") ); - List> expectedUsersPerRegion = Arrays.asList( + final List> expectedUsersPerRegion = Arrays.asList( new KeyValue<>("europe", 1L), // in the end, Alice is in europe new KeyValue<>("asia", 1L) // in the end, Bob is in asia ); @@ -100,7 +100,7 @@ public void shouldCountUsersPerRegion() throws Exception { final Serde stringSerde = Serdes.String(); final Serde longSerde = Serdes.Long(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "user-regions-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -112,24 +112,24 @@ public void shouldCountUsersPerRegion() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - KTable userRegionsTable = builder.table(inputTopic); + final KTable userRegionsTable = builder.table(inputTopic); - KTable usersPerRegionTable = userRegionsTable + final KTable usersPerRegionTable = userRegionsTable // no need to specify explicit serdes because the resulting key and value types match our default serde settings .groupBy((userId, region) -> KeyValue.pair(region, region)) .count(); usersPerRegionTable.toStream().to(outputTopic, Produced.with(stringSerde, longSerde)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Publish user-region information. // - Properties userRegionsProducerConfig = new Properties(); + final Properties userRegionsProducerConfig = new Properties(); userRegionsProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); userRegionsProducerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); userRegionsProducerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -140,13 +140,13 @@ public void shouldCountUsersPerRegion() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "user-regions-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); - List> actualClicksPerRegion = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, + final List> actualClicksPerRegion = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedUsersPerRegion.size()); streams.close(); assertThat(actualClicksPerRegion).containsExactlyElementsOf(expectedUsersPerRegion); diff --git a/src/test/java/io/confluent/examples/streams/ValidateStateWithInteractiveQueriesLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/ValidateStateWithInteractiveQueriesLambdaIntegrationTest.java index c06b680601..89debba8e2 100644 --- a/src/test/java/io/confluent/examples/streams/ValidateStateWithInteractiveQueriesLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/ValidateStateWithInteractiveQueriesLambdaIntegrationTest.java @@ -63,7 +63,7 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldComputeMaxValuePerKey() throws Exception { // A user may be listed multiple times. - List> inputUserClicks = Arrays.asList( + final List> inputUserClicks = Arrays.asList( new KeyValue<>("alice", 13L), new KeyValue<>("bob", 4L), new KeyValue<>("chao", 25L), @@ -74,7 +74,7 @@ public void shouldComputeMaxValuePerKey() throws Exception { new KeyValue<>("bob", 3L) ); - Map expectedMaxClicksPerUser = new HashMap() { + final Map expectedMaxClicksPerUser = new HashMap() { { put("alice", 78L); put("bob", 19L); @@ -85,9 +85,9 @@ public void shouldComputeMaxValuePerKey() throws Exception { // // Step 1: Configure and start the processor topology. // - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "validating-with-interactive-queries-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -99,10 +99,10 @@ public void shouldComputeMaxValuePerKey() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - KStream input = builder.stream(inputTopic); + final KStream input = builder.stream(inputTopic); // rolling MAX() aggregation - String maxStore = "max-store"; + final String maxStore = "max-store"; input.groupByKey().aggregate( () -> Long.MIN_VALUE, (aggKey, value, aggregate) -> Math.max(value, aggregate), @@ -110,7 +110,7 @@ public void shouldComputeMaxValuePerKey() throws Exception { ); // windowed MAX() aggregation - String maxWindowStore = "max-window-store"; + final String maxWindowStore = "max-window-store"; input.groupByKey() .windowedBy(TimeWindows.of(TimeUnit.MINUTES.toMillis(1L)).until(TimeUnit.MINUTES.toMillis(5L))) .aggregate( @@ -118,13 +118,13 @@ public void shouldComputeMaxValuePerKey() throws Exception { (aggKey, value, aggregate) -> Math.max(value, aggregate), Materialized.as(maxWindowStore)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -135,9 +135,9 @@ public void shouldComputeMaxValuePerKey() throws Exception { // // Step 3: Validate the application's state by interactively querying its state stores. // - ReadOnlyKeyValueStore keyValueStore = + final ReadOnlyKeyValueStore keyValueStore = IntegrationTestUtils.waitUntilStoreIsQueryable(maxStore, QueryableStoreTypes.keyValueStore(), streams); - ReadOnlyWindowStore windowStore = + final ReadOnlyWindowStore windowStore = IntegrationTestUtils.waitUntilStoreIsQueryable(maxWindowStore, QueryableStoreTypes.windowStore(), streams); // Wait a bit so that the input data can be fully processed to ensure that the stores can diff --git a/src/test/java/io/confluent/examples/streams/WordCountLambdaIntegrationTest.java b/src/test/java/io/confluent/examples/streams/WordCountLambdaIntegrationTest.java index dc43d4c61c..177a9de727 100644 --- a/src/test/java/io/confluent/examples/streams/WordCountLambdaIntegrationTest.java +++ b/src/test/java/io/confluent/examples/streams/WordCountLambdaIntegrationTest.java @@ -68,13 +68,13 @@ public static void startKafkaCluster() throws Exception { @Test public void shouldCountWords() throws Exception { - List inputValues = Arrays.asList( + final List inputValues = Arrays.asList( "Hello Kafka Streams", "All streams lead to Kafka", "Join Kafka Summit", "И теперь пошли русские слова" ); - List> expectedWordCounts = Arrays.asList( + final List> expectedWordCounts = Arrays.asList( new KeyValue<>("hello", 1L), new KeyValue<>("all", 1L), new KeyValue<>("streams", 2L), @@ -96,7 +96,7 @@ public void shouldCountWords() throws Exception { final Serde stringSerde = Serdes.String(); final Serde longSerde = Serdes.Long(); - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-lambda-integration-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); @@ -108,13 +108,13 @@ public void shouldCountWords() throws Exception { // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); - StreamsBuilder builder = new StreamsBuilder(); + final StreamsBuilder builder = new StreamsBuilder(); - KStream textLines = builder.stream(inputTopic); + final KStream textLines = builder.stream(inputTopic); - Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); + final Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); - KTable wordCounts = textLines + final KTable wordCounts = textLines .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))) // no need to specify explicit serdes because the resulting key and value types match our default serde settings .groupBy((key, word) -> word) @@ -122,13 +122,13 @@ public void shouldCountWords() throws Exception { wordCounts.toStream().to(outputTopic, Produced.with(stringSerde, longSerde)); - KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); + final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); // // Step 2: Produce some input data to the input topic. // - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -139,13 +139,13 @@ public void shouldCountWords() throws Exception { // // Step 3: Verify the application's output data. // - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "wordcount-lambda-integration-test-standard-consumer"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); - List> actualWordCounts = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, + final List> actualWordCounts = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedWordCounts.size()); streams.close(); assertThat(actualWordCounts).containsExactlyElementsOf(expectedWordCounts); diff --git a/src/test/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExampleTest.java b/src/test/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExampleTest.java index 64da330510..59299cb7d2 100644 --- a/src/test/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExampleTest.java +++ b/src/test/java/io/confluent/examples/streams/interactivequeries/WordCountInteractiveQueriesExampleTest.java @@ -96,8 +96,8 @@ public void shutdown() throws Exception { } public static int randomFreeLocalPort() throws IOException { - ServerSocket s = new ServerSocket(0); - int port = s.getLocalPort(); + final ServerSocket s = new ServerSocket(0); + final int port = s.getLocalPort(); s.close(); return port; } @@ -113,7 +113,7 @@ public void shouldDemonstrateInteractiveQueries() throws Exception { "streams", "kafka streams"); - Properties producerConfig = new Properties(); + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); @@ -166,7 +166,7 @@ public void shouldDemonstrateInteractiveQueries() throws Exception { new HostStoreInfo("localhost", port, Sets.newHashSet("word-count", "windowed-word-count")) )); - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); @@ -239,7 +239,7 @@ public void shouldDemonstrateInteractiveQueries() throws Exception { * directly after KafkaStreams.start(), so it can take some time * for the group to stabilize and all stores/instances to be available */ - private List fetchHostInfo(Invocation.Builder request) throws InterruptedException { + private List fetchHostInfo(final Invocation.Builder request) throws InterruptedException { List hostStoreInfo = MicroserviceTestUtils.getWithRetries(request, new GenericType>(){}, 5); final long until = System.currentTimeMillis() + 60000L; while (hostStoreInfo.isEmpty() || @@ -257,7 +257,7 @@ private List fetchRangeOfValues(final Invocation.Builder request, while (!results.containsAll(expectedResults) && System.currentTimeMillis() < timeout) { try { results = MicroserviceTestUtils.getWithRetries(request, new GenericType>() {}, 5); - } catch (NotFoundException e) { + } catch (final NotFoundException e) { // } } @@ -268,7 +268,7 @@ private List fetchRangeOfValues(final Invocation.Builder request, private Properties createStreamConfig(final String bootStrap, final int port, final String stateDir) throws IOException { - Properties streamsConfiguration = new Properties(); + final Properties streamsConfiguration = new Properties(); // Give the Streams application a unique name. The name must be unique in the Kafka cluster // against which the application is run. streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "interactive-queries-wordcount-example"); diff --git a/src/test/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleTest.java b/src/test/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleTest.java index 38f369d7fe..9b07427b78 100644 --- a/src/test/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleTest.java +++ b/src/test/java/io/confluent/examples/streams/interactivequeries/kafkamusic/KafkaMusicExampleTest.java @@ -231,7 +231,7 @@ public void shouldCreateChartsAndAccessThemViaInteractiveQueries() throws Except try { songsStore = streams.store(KafkaMusicExample.ALL_SONGS, QueryableStoreTypes.keyValueStore()); return songsStore.all().hasNext(); - } catch (Exception e) { + } catch (final Exception e) { e.printStackTrace(); return false; } @@ -285,7 +285,7 @@ private void verifyChart(final String url, 0); System.err.println(chart.size()); return chart.size() == 5; - } catch (Exception e) { + } catch (final Exception e) { e.printStackTrace(); return false; } diff --git a/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java b/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java index 8d9949e021..02a1a06fde 100644 --- a/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java +++ b/src/test/java/io/confluent/examples/streams/kafka/EmbeddedSingleNodeKafkaCluster.java @@ -69,7 +69,7 @@ public EmbeddedSingleNodeKafkaCluster() { * * @param brokerConfig Additional broker configuration settings. */ - public EmbeddedSingleNodeKafkaCluster(Properties brokerConfig) { + public EmbeddedSingleNodeKafkaCluster(final Properties brokerConfig) { this.brokerConfig = new Properties(); this.brokerConfig.put(SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS); this.brokerConfig.putAll(brokerConfig); @@ -90,14 +90,14 @@ public void start() throws Exception { 30000, JaasUtils.isZkSecurityEnabled()); - Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper); + final Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper); log.debug("Starting a Kafka instance on port {} ...", effectiveBrokerConfig.getProperty(KafkaConfig$.MODULE$.PortProp())); broker = new KafkaEmbedded(effectiveBrokerConfig); log.debug("Kafka instance is running at {}, connected to ZooKeeper at {}", broker.brokerList(), broker.zookeeperConnect()); - Properties schemaRegistryProps = new Properties(); + final Properties schemaRegistryProps = new Properties(); schemaRegistryProps.put(SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS); schemaRegistryProps.put(SchemaRegistryConfig.DEBUG_CONFIG, KAFKASTORE_DEBUG); @@ -108,8 +108,8 @@ public void start() throws Exception { running = true; } - private Properties effectiveBrokerConfigFrom(Properties brokerConfig, ZooKeeperEmbedded zookeeper) { - Properties effectiveConfig = new Properties(); + private Properties effectiveBrokerConfigFrom(final Properties brokerConfig, final ZooKeeperEmbedded zookeeper) { + final Properties effectiveConfig = new Properties(); effectiveConfig.putAll(brokerConfig); effectiveConfig.put(KafkaConfig$.MODULE$.ZkConnectProp(), zookeeper.connectString()); effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), DEFAULT_BROKER_PORT); @@ -142,7 +142,7 @@ public void stop() { if (schemaRegistry != null) { schemaRegistry.stop(); } - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } if (broker != null) { @@ -152,7 +152,7 @@ public void stop() { if (zookeeper != null) { zookeeper.stop(); } - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException(e); } } finally { @@ -194,7 +194,7 @@ public String schemaRegistryUrl() { * * @param topic The name of the topic. */ - public void createTopic(String topic) { + public void createTopic(final String topic) { createTopic(topic, 1, 1, new Properties()); } @@ -205,7 +205,7 @@ public void createTopic(String topic) { * @param partitions The number of partitions for this topic. * @param replication The replication factor for (the partitions of) this topic. */ - public void createTopic(String topic, int partitions, int replication) { + public void createTopic(final String topic, final int partitions, final int replication) { createTopic(topic, partitions, replication, new Properties()); } @@ -217,10 +217,10 @@ public void createTopic(String topic, int partitions, int replication) { * @param replication The replication factor for (partitions of) this topic. * @param topicConfig Additional topic-level configuration settings. */ - public void createTopic(String topic, - int partitions, - int replication, - Properties topicConfig) { + public void createTopic(final String topic, + final int partitions, + final int replication, + final Properties topicConfig) { broker.createTopic(topic, partitions, replication, topicConfig); } diff --git a/src/test/java/io/confluent/examples/streams/kafka/KafkaEmbedded.java b/src/test/java/io/confluent/examples/streams/kafka/KafkaEmbedded.java index 457d0e9a81..e243c7c80f 100644 --- a/src/test/java/io/confluent/examples/streams/kafka/KafkaEmbedded.java +++ b/src/test/java/io/confluent/examples/streams/kafka/KafkaEmbedded.java @@ -64,14 +64,14 @@ public class KafkaEmbedded { * broker should listen to. Note that you cannot change some settings such as * `log.dirs`, `port`. */ - public KafkaEmbedded(Properties config) throws IOException { + public KafkaEmbedded(final Properties config) throws IOException { tmpFolder = new TemporaryFolder(); tmpFolder.create(); logDir = tmpFolder.newFolder(); effectiveConfig = effectiveConfigFrom(config); - boolean loggingEnabled = true; + final boolean loggingEnabled = true; - KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, loggingEnabled); + final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, loggingEnabled); log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...", logDir, zookeeperConnect()); kafka = TestUtils.createServer(kafkaConfig, Time.SYSTEM); @@ -79,8 +79,8 @@ public KafkaEmbedded(Properties config) throws IOException { brokerList(), zookeeperConnect()); } - private Properties effectiveConfigFrom(Properties initialConfig) throws IOException { - Properties effectiveConfig = new Properties(); + private Properties effectiveConfigFrom(final Properties initialConfig) throws IOException { + final Properties effectiveConfig = new Properties(); effectiveConfig.put(KafkaConfig$.MODULE$.BrokerIdProp(), 0); effectiveConfig.put(KafkaConfig$.MODULE$.HostNameProp(), "127.0.0.1"); effectiveConfig.put(KafkaConfig$.MODULE$.PortProp(), "9092"); @@ -131,7 +131,7 @@ public void stop() { * * @param topic The name of the topic. */ - public void createTopic(String topic) { + public void createTopic(final String topic) { createTopic(topic, 1, 1, new Properties()); } @@ -142,7 +142,7 @@ public void createTopic(String topic) { * @param partitions The number of partitions for this topic. * @param replication The replication factor for (the partitions of) this topic. */ - public void createTopic(String topic, int partitions, int replication) { + public void createTopic(final String topic, final int partitions, final int replication) { createTopic(topic, partitions, replication, new Properties()); } @@ -154,23 +154,23 @@ public void createTopic(String topic, int partitions, int replication) { * @param replication The replication factor for (partitions of) this topic. * @param topicConfig Additional topic-level configuration settings. */ - public void createTopic(String topic, - int partitions, - int replication, - Properties topicConfig) { + public void createTopic(final String topic, + final int partitions, + final int replication, + final Properties topicConfig) { log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig); // Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then // createTopic() will only seem to work (it will return without error). The topic will exist in // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the // topic. - ZkClient zkClient = new ZkClient( + final ZkClient zkClient = new ZkClient( zookeeperConnect(), DEFAULT_ZK_SESSION_TIMEOUT_MS, DEFAULT_ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$); - boolean isSecure = false; - ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure); + final boolean isSecure = false; + final ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure); AdminUtils.createTopic(zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$); zkClient.close(); } @@ -180,15 +180,15 @@ public void createTopic(String topic, * * @param topic The name of the topic. */ - public void deleteTopic(String topic) { + public void deleteTopic(final String topic) { log.debug("Deleting topic {}", topic); - ZkClient zkClient = new ZkClient( + final ZkClient zkClient = new ZkClient( zookeeperConnect(), DEFAULT_ZK_SESSION_TIMEOUT_MS, DEFAULT_ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$); - boolean isSecure = false; - ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure); + final boolean isSecure = false; + final ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure); AdminUtils.deleteTopic(zkUtils, topic); zkClient.close(); } diff --git a/src/test/java/io/confluent/examples/streams/microservices/EmailServiceTest.java b/src/test/java/io/confluent/examples/streams/microservices/EmailServiceTest.java index 9392bfbbb6..c41c181cd7 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/EmailServiceTest.java +++ b/src/test/java/io/confluent/examples/streams/microservices/EmailServiceTest.java @@ -46,10 +46,11 @@ public void tearDown() { public void shouldSendEmailWithValidContents() throws Exception { //Given one order, customer and payment - String orderId = id(0L); - Order order = new Order(orderId, 15L, CREATED, UNDERPANTS, 3, 5.00d); - Customer customer = new Customer(15L, "Franz", "Kafka", "frans@thedarkside.net", "oppression street, prague, cze"); - Payment payment = new Payment("Payment:1234", orderId, "CZK", 1000.00d); + final String orderId = id(0L); + final Order order = new Order(orderId, 15L, CREATED, UNDERPANTS, 3, 5.00d); + final Customer customer = + new Customer(15L, "Franz", "Kafka", "frans@thedarkside.net", "oppression street, prague, cze"); + final Payment payment = new Payment("Payment:1234", orderId, "CZK", 1000.00d); emailService = new EmailService(details -> { assertThat(details.customer).isEqualTo(customer); diff --git a/src/test/java/io/confluent/examples/streams/microservices/EndToEndTest.java b/src/test/java/io/confluent/examples/streams/microservices/EndToEndTest.java index f1688e0dfa..3348d39a7d 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/EndToEndTest.java +++ b/src/test/java/io/confluent/examples/streams/microservices/EndToEndTest.java @@ -36,7 +36,7 @@ public class EndToEndTest extends MicroserviceTestUtils { private static final Logger log = LoggerFactory.getLogger(EndToEndTest.class); private static final String HOST = "localhost"; - private List services = new ArrayList<>(); + private final List services = new ArrayList<>(); private OrderBean returnedBean; private long startTime; private Paths path; @@ -44,11 +44,11 @@ public class EndToEndTest extends MicroserviceTestUtils { @Test public void shouldCreateNewOrderAndGetBackValidatedOrder() { - OrderBean inputOrder = new OrderBean(id(1L), 2L, OrderState.CREATED, Product.JUMPERS, 1, 1d); + final OrderBean inputOrder = new OrderBean(id(1L), 2L, OrderState.CREATED, Product.JUMPERS, 1, 1d); client = getClient(); //Add inventory required by the inventory service with enough items in stock to pass validation - List> inventory = asList( + final List> inventory = asList( new KeyValue<>(UNDERPANTS, 75), new KeyValue<>(JUMPERS, 10) ); @@ -56,7 +56,7 @@ public void shouldCreateNewOrderAndGetBackValidatedOrder() { //When we POST order and immediately GET on the returned location postWithRetries(client.target(path.urlPost()).request(APPLICATION_JSON_TYPE), Entity.json(inputOrder), 5); - Invocation.Builder builder = client + final Invocation.Builder builder = client .target(path.urlGetValidated(1)) .queryParam("timeout", MIN) .request(APPLICATION_JSON_TYPE); @@ -71,7 +71,7 @@ public void shouldProcessManyValidOrdersEndToEnd() { client = getClient(); //Add inventory required by the inventory service - List> inventory = asList( + final List> inventory = asList( new KeyValue<>(UNDERPANTS, 75), new KeyValue<>(JUMPERS, 10) ); @@ -79,13 +79,13 @@ public void shouldProcessManyValidOrdersEndToEnd() { //Send ten orders in succession for (int i = 0; i < 10; i++) { - OrderBean inputOrder = new OrderBean(id(i), 2L, OrderState.CREATED, Product.JUMPERS, 1, 1d); + final OrderBean inputOrder = new OrderBean(id(i), 2L, OrderState.CREATED, Product.JUMPERS, 1, 1d); startTimer(); //POST & GET order postWithRetries(client.target(path.urlPost()).request(APPLICATION_JSON_TYPE), Entity.json(inputOrder), 5); - Invocation.Builder builder = client + final Invocation.Builder builder = client .target(path.urlGetValidated(i)) .queryParam("timeout", MIN) .request(APPLICATION_JSON_TYPE); @@ -109,7 +109,7 @@ public void shouldProcessManyInvalidOrdersEndToEnd() { client = getClient(); //Add inventory required by the inventory service - List> inventory = asList( + final List> inventory = asList( new KeyValue<>(UNDERPANTS, 75000), new KeyValue<>(JUMPERS, 0) //***nothing in stock*** ); @@ -117,13 +117,13 @@ public void shouldProcessManyInvalidOrdersEndToEnd() { //Send ten orders one after the other for (int i = 0; i < 10; i++) { - OrderBean inputOrder = new OrderBean(id(i), 2L, OrderState.CREATED, Product.JUMPERS, 1, 1d); + final OrderBean inputOrder = new OrderBean(id(i), 2L, OrderState.CREATED, Product.JUMPERS, 1, 1d); startTimer(); //POST & GET order postWithRetries(client.target(path.urlPost()).request(APPLICATION_JSON_TYPE), Entity.json(inputOrder), 5); - Invocation.Builder builder = client + final Invocation.Builder builder = client .target(path.urlGetValidated(i)) .queryParam("timeout", MIN) .request(APPLICATION_JSON_TYPE); diff --git a/src/test/java/io/confluent/examples/streams/microservices/FraudServiceTest.java b/src/test/java/io/confluent/examples/streams/microservices/FraudServiceTest.java index 1d9bc7c301..b91fb3ec53 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/FraudServiceTest.java +++ b/src/test/java/io/confluent/examples/streams/microservices/FraudServiceTest.java @@ -47,7 +47,7 @@ public void shouldValidateWhetherOrderAmountExceedsFraudLimitOverWindow() throws //Given fraudService = new FraudService(); - List orders = asList( + final List orders = asList( new Order(id(0L), 0L, CREATED, UNDERPANTS, 3, 5.00d), new Order(id(1L), 0L, CREATED, JUMPERS, 1, 75.00d), new Order(id(2L), 1L, CREATED, JUMPERS, 1, 75.00d), @@ -63,7 +63,7 @@ public void shouldValidateWhetherOrderAmountExceedsFraudLimitOverWindow() throws fraudService.start(CLUSTER.bootstrapServers()); //Then there should be failures for the two orders that push customers over their limit. - List expected = asList( + final List expected = asList( new OrderValidation(id(0L), FRAUD_CHECK, PASS), new OrderValidation(id(1L), FRAUD_CHECK, PASS), new OrderValidation(id(2L), FRAUD_CHECK, PASS), @@ -73,7 +73,7 @@ public void shouldValidateWhetherOrderAmountExceedsFraudLimitOverWindow() throws new OrderValidation(id(6L), FRAUD_CHECK, FAIL), new OrderValidation(id(7L), FRAUD_CHECK, FAIL) ); - List read = read(Topics.ORDER_VALIDATIONS, 8, CLUSTER.bootstrapServers()); + final List read = read(Topics.ORDER_VALIDATIONS, 8, CLUSTER.bootstrapServers()); assertThat(read).isEqualTo(expected); } } \ No newline at end of file diff --git a/src/test/java/io/confluent/examples/streams/microservices/InventoryServiceTest.java b/src/test/java/io/confluent/examples/streams/microservices/InventoryServiceTest.java index 4d595892fa..99562ddffd 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/InventoryServiceTest.java +++ b/src/test/java/io/confluent/examples/streams/microservices/InventoryServiceTest.java @@ -79,14 +79,14 @@ public void shouldProcessOrdersWithSufficientStockAndRejectOrdersWithInsufficien .isEqualTo(expected); //And the reservations should have been incremented twice, once for each validated order - List> inventoryChangelog = readInventoryStateStore(2); + final List> inventoryChangelog = readInventoryStateStore(2); assertThat(inventoryChangelog).isEqualTo(asList( new KeyValue<>(UNDERPANTS.toString(), 3L), new KeyValue<>(JUMPERS.toString(), 1L) )); } - private List> readInventoryStateStore(int numberOfRecordsToWaitFor) + private List> readInventoryStateStore(final int numberOfRecordsToWaitFor) throws InterruptedException { return IntegrationTestUtils .waitUntilMinKeyValueRecordsReceived(inventoryConsumerProperties(CLUSTER), @@ -94,8 +94,8 @@ private List> readInventoryStateStore(int numberOfRecord InventoryService.RESERVED_STOCK_STORE_NAME), numberOfRecordsToWaitFor); } - private static Properties inventoryConsumerProperties(EmbeddedSingleNodeKafkaCluster cluster) { - Properties consumerConfig = new Properties(); + private static Properties inventoryConsumerProperties(final EmbeddedSingleNodeKafkaCluster cluster) { + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "inventory-test-reader"); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); diff --git a/src/test/java/io/confluent/examples/streams/microservices/OrdersServiceTest.java b/src/test/java/io/confluent/examples/streams/microservices/OrdersServiceTest.java index 6fdbc3b4af..6ac6faa499 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/OrdersServiceTest.java +++ b/src/test/java/io/confluent/examples/streams/microservices/OrdersServiceTest.java @@ -61,17 +61,17 @@ public void prepareKafkaCluster() throws Exception { @Test public void shouldPostOrderAndGetItBack() { - OrderBean bean = new OrderBean(id(1L), 2L, OrderState.CREATED, Product.JUMPERS, 10, 100d); + final OrderBean bean = new OrderBean(id(1L), 2L, OrderState.CREATED, Product.JUMPERS, 10, 100d); final Client client = ClientBuilder.newClient(); //Given a rest service rest = new OrdersService("localhost"); rest.start(CLUSTER.bootstrapServers()); - Paths paths = new Paths("localhost", rest.port()); + final Paths paths = new Paths("localhost", rest.port()); //When we POST an order - Response response = postWithRetries( + final Response response = postWithRetries( client.target(paths.urlPost()).request(APPLICATION_JSON_TYPE), Entity.json(bean), 5); @@ -103,15 +103,15 @@ public void shouldPostOrderAndGetItBack() { @Test public void shouldGetValidatedOrderOnRequest() { - Order orderV1 = new Order(id(1L), 3L, OrderState.CREATED, Product.JUMPERS, 10, 100d); - OrderBean beanV1 = OrderBean.toBean(orderV1); + final Order orderV1 = new Order(id(1L), 3L, OrderState.CREATED, Product.JUMPERS, 10, 100d); + final OrderBean beanV1 = OrderBean.toBean(orderV1); final Client client = ClientBuilder.newClient(); //Given a rest service rest = new OrdersService("localhost"); rest.start(CLUSTER.bootstrapServers()); - Paths paths = new Paths("localhost", rest.port()); + final Paths paths = new Paths("localhost", rest.port()); //When we post an order postWithRetries(client.target(paths.urlPost()).request(APPLICATION_JSON_TYPE), Entity.json(beanV1), 5); @@ -123,12 +123,12 @@ public void shouldGetValidatedOrderOnRequest() { .build())); //When we GET the order from the returned location - Invocation.Builder builder = client + final Invocation.Builder builder = client .target(paths.urlGetValidated(beanV1.getId())) .queryParam("timeout", MIN / 3) .request(APPLICATION_JSON_TYPE); - OrderBean returnedBean = getWithRetries(builder, newBean(), 5); + final OrderBean returnedBean = getWithRetries(builder, newBean(), 5); //Then status should be Validated assertThat(returnedBean.getState()).isEqualTo(OrderState.VALIDATED); @@ -141,9 +141,9 @@ public void shouldTimeoutGetIfNoResponseIsFound() { //Start the rest interface rest = new OrdersService("localhost"); rest.start(CLUSTER.bootstrapServers()); - Paths paths = new Paths("localhost", rest.port()); + final Paths paths = new Paths("localhost", rest.port()); - Invocation.Builder builder = client + final Invocation.Builder builder = client .target(paths.urlGet(1)) .queryParam("timeout", 100) //Lower the request timeout .request(APPLICATION_JSON_TYPE); @@ -152,29 +152,29 @@ public void shouldTimeoutGetIfNoResponseIsFound() { try { getWithRetries(builder, newBean(), 0); // no retries to fail fast fail("Request should have failed as materialized view has not been updated"); - } catch (ServerErrorException e) { + } catch (final ServerErrorException e) { assertThat(e.getMessage()).isEqualTo("HTTP 504 Gateway Timeout"); } } @Test public void shouldGetOrderByIdWhenOnDifferentHost() { - OrderBean order = new OrderBean(id(1L), 4L, OrderState.VALIDATED, Product.JUMPERS, 10, 100d); + final OrderBean order = new OrderBean(id(1L), 4L, OrderState.VALIDATED, Product.JUMPERS, 10, 100d); final Client client = ClientBuilder.newClient(); //Given two rest servers on different ports rest = new OrdersService("localhost"); rest.start(CLUSTER.bootstrapServers()); - Paths paths1 = new Paths("localhost", rest.port()); + final Paths paths1 = new Paths("localhost", rest.port()); rest2 = new OrdersService("localhost"); rest2.start(CLUSTER.bootstrapServers()); - Paths paths2 = new Paths("localhost", rest2.port()); + final Paths paths2 = new Paths("localhost", rest2.port()); //And one order postWithRetries(client.target(paths1.urlPost()).request(APPLICATION_JSON_TYPE), Entity.json(order), 5); //When GET to rest1 - Invocation.Builder builder = client.target(paths1.urlGet(order.getId())) + final Invocation.Builder builder = client.target(paths1.urlGet(order.getId())) .queryParam("timeout", MIN / 3) .request(APPLICATION_JSON_TYPE); diff --git a/src/test/java/io/confluent/examples/streams/microservices/ValidationsAggregatorServiceTest.java b/src/test/java/io/confluent/examples/streams/microservices/ValidationsAggregatorServiceTest.java index 5300e01179..2ef97015f5 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/ValidationsAggregatorServiceTest.java +++ b/src/test/java/io/confluent/examples/streams/microservices/ValidationsAggregatorServiceTest.java @@ -64,7 +64,7 @@ public void shouldAggregateRuleSuccesses() throws Exception { ordersService.start(CLUSTER.bootstrapServers()); //Then - List> finalOrders = MicroserviceTestUtils + final List> finalOrders = MicroserviceTestUtils .readKeyValues(Topics.ORDERS, 4, CLUSTER.bootstrapServers()); assertThat(finalOrders.size()).isEqualTo(4); diff --git a/src/test/java/io/confluent/examples/streams/microservices/util/MicroserviceTestUtils.java b/src/test/java/io/confluent/examples/streams/microservices/util/MicroserviceTestUtils.java index bea67998a4..9f498cd6f3 100644 --- a/src/test/java/io/confluent/examples/streams/microservices/util/MicroserviceTestUtils.java +++ b/src/test/java/io/confluent/examples/streams/microservices/util/MicroserviceTestUtils.java @@ -66,49 +66,49 @@ public static void stopCluster() { } } - protected static Properties producerConfig(EmbeddedSingleNodeKafkaCluster cluster) { - Properties producerConfig = new Properties(); + protected static Properties producerConfig(final EmbeddedSingleNodeKafkaCluster cluster) { + final Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); producerConfig.put(ProducerConfig.ACKS_CONFIG, "all"); producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0); return producerConfig; } - public static List read(Schemas.Topic topic, int numberToRead, - String bootstrapServers) throws InterruptedException { + public static List read(final Schemas.Topic topic, final int numberToRead, + final String bootstrapServers) throws InterruptedException { return readKeyValues(topic, numberToRead, bootstrapServers).stream().map(kv -> kv.value) .collect(Collectors.toList()); } - public static List readKeys(Schemas.Topic topic, int numberToRead, - String bootstrapServers) throws InterruptedException { + public static List readKeys(final Schemas.Topic topic, final int numberToRead, + final String bootstrapServers) throws InterruptedException { return readKeyValues(topic, numberToRead, bootstrapServers).stream().map(kv -> kv.key) .collect(Collectors.toList()); } - public static List> readKeyValues(Schemas.Topic topic, - int numberToRead, String bootstrapServers) throws InterruptedException { - Deserializer keyDes = topic.keySerde().deserializer(); - Deserializer valDes = topic.valueSerde().deserializer(); - String topicName = topic.name(); + public static List> readKeyValues(final Schemas.Topic topic, + final int numberToRead, final String bootstrapServers) throws InterruptedException { + final Deserializer keyDes = topic.keySerde().deserializer(); + final Deserializer valDes = topic.valueSerde().deserializer(); + final String topicName = topic.name(); return readKeysAndValues(numberToRead, bootstrapServers, keyDes, valDes, topicName); } - private static List> readKeysAndValues(int numberToRead, - String bootstrapServers, Deserializer keyDes, Deserializer valDes, String topicName) + private static List> readKeysAndValues(final int numberToRead, + final String bootstrapServers, final Deserializer keyDes, final Deserializer valDes, final String topicName) throws InterruptedException { - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "Test-Reader-" + consumerCounter++); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig, keyDes, valDes); + final KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig, keyDes, valDes); consumer.subscribe(singletonList(topicName)); - List> actualValues = new ArrayList<>(); + final List> actualValues = new ArrayList<>(); TestUtils.waitForCondition(() -> { - ConsumerRecords records = consumer.poll(100); - for (ConsumerRecord record : records) { + final ConsumerRecords records = consumer.poll(100); + for (final ConsumerRecord record : records) { actualValues.add(KeyValue.pair(record.key(), record.value())); } return actualValues.size() == numberToRead; @@ -117,9 +117,9 @@ private static List> readKeysAndValues(int numberToRead, return actualValues; } - private static void tailAllTopicsToConsole(Schemas.Topic topic, - String bootstrapServers) { - TopicTailer task = new TopicTailer<>(topic, bootstrapServers); + private static void tailAllTopicsToConsole(final Schemas.Topic topic, + final String bootstrapServers) { + final TopicTailer task = new TopicTailer<>(topic, bootstrapServers); tailers.add(task); Executors.newSingleThreadExecutor().execute(task); } @@ -128,8 +128,8 @@ public static void stopTailers() { tailers.forEach(TopicTailer::stop); } - public static void tailAllTopicsToConsole(String bootstrapServers) { - for (Topic t : Topics.ALL.values()) { + public static void tailAllTopicsToConsole(final String bootstrapServers) { + for (final Topic t : Topics.ALL.values()) { tailAllTopicsToConsole(t, bootstrapServers); } } @@ -142,7 +142,7 @@ static class TopicTailer implements Runnable { private Topic topic; private String bootstrapServers; - public TopicTailer(Schemas.Topic topic, String bootstrapServers) { + public TopicTailer(final Schemas.Topic topic, final String bootstrapServers) { this.topic = topic; this.bootstrapServers = bootstrapServers; } @@ -150,18 +150,18 @@ public TopicTailer(Schemas.Topic topic, String bootstrapServers) { @Override public void run() { try { - Properties consumerConfig = new Properties(); + final Properties consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "Test-Reader-" + consumerCounter++); consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig, + final KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig, topic.keySerde().deserializer(), topic.valueSerde().deserializer()); consumer.subscribe(singletonList(topic.name())); while (running) { - ConsumerRecords records = consumer.poll(100); - for (ConsumerRecord record : records) { + final ConsumerRecords records = consumer.poll(100); + for (final ConsumerRecord record : records) { log.info("Tailer[" + topic.name() + "-Offset:" + record.offset() + "]: " + record.key() + "->" + record.value()); } @@ -178,55 +178,55 @@ void stop() { try { Thread.sleep(200); log.info("Closing tailer..."); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { e.printStackTrace(); } } } } - public static void send(Topic topic, KeyValue record) { + public static void send(final Topic topic, final KeyValue record) { send(topic, Collections.singletonList(record)); } - public static void send(Topic topic, Collection> stuff) { - try (KafkaProducer producer = new KafkaProducer<>( + public static void send(final Topic topic, final Collection> stuff) { + try (final KafkaProducer producer = new KafkaProducer<>( producerConfig(CLUSTER), topic.keySerde().serializer(), topic.valueSerde().serializer())) { - for (KeyValue order : stuff) { + for (final KeyValue order : stuff) { producer.send(new ProducerRecord<>(topic.name(), order.key, order.value)).get(); } - } catch (InterruptedException | ExecutionException e) { + } catch (final InterruptedException | ExecutionException e) { e.printStackTrace(); } } - public static void sendOrders(List orders) { - List> collect = orders.stream().map(o -> new KeyValue<>(o.getId(), o)) + public static void sendOrders(final List orders) { + final List> collect = orders.stream().map(o -> new KeyValue<>(o.getId(), o)) .collect(Collectors.toList()); send(Topics.ORDERS, collect); } - public static void sendOrderValuations(List orderValidations) { - List> collect = orderValidations.stream().map(o -> new KeyValue<>(o.getOrderId(), o)) + public static void sendOrderValuations(final List orderValidations) { + final List> collect = orderValidations.stream().map(o -> new KeyValue<>(o.getOrderId(), o)) .collect(Collectors.toList()); send(Topics.ORDER_VALIDATIONS, collect); } - public static void sendInventory(List> inventory, - Schemas.Topic topic) { - try (KafkaProducer stockProducer = new KafkaProducer<>( + public static void sendInventory(final List> inventory, + final Schemas.Topic topic) { + try (final KafkaProducer stockProducer = new KafkaProducer<>( producerConfig(CLUSTER), topic.keySerde().serializer(), Schemas.Topics.WAREHOUSE_INVENTORY.valueSerde().serializer())) { - for (KeyValue kv : inventory) { + for (final KeyValue kv : inventory) { stockProducer.send(new ProducerRecord<>(Topics.WAREHOUSE_INVENTORY.name(), kv.key, kv.value)) .get(); } - } catch (InterruptedException | ExecutionException e) { + } catch (final InterruptedException | ExecutionException e) { e.printStackTrace(); } } @@ -237,7 +237,7 @@ public static T getWithRetries(final Invocation.Builder builder, while (true) { try { return builder.get(genericType); - } catch (ServerErrorException exception) { + } catch (final ServerErrorException exception) { if (exception.getMessage().contains("504") && numberOfRetries-- > 0) { continue; } @@ -252,7 +252,7 @@ public static T getWithRetries(final Invocation.Builder builder, while (true) { try { return builder.get(clazz); - } catch (ServerErrorException exception) { + } catch (final ServerErrorException exception) { if (exception.getMessage().contains("504") && numberOfRetries-- > 0) { continue; } @@ -267,7 +267,7 @@ public static Response postWithRetries(final Invocation.Builder builder, while (true) { try { return builder.post(entity); - } catch (ServerErrorException exception) { + } catch (final ServerErrorException exception) { if (exception.getMessage().contains("504") && numberOfRetries-- > 0) { continue; }