diff --git a/build.gradle b/build.gradle
index aabc1aa3..78c4b01f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -82,7 +82,7 @@ task archive(type: Tar) {
baseName = "com.ibm.streamsx.kafka"
version = toolkitVersion
doLast {
- ant.checksum file: archivePath
+ ant.checksum file: archivePath, algorithm: 'SHA-1', fileext: '.sha1'
}
}
diff --git a/com.ibm.streamsx.kafka/.classpath b/com.ibm.streamsx.kafka/.classpath
index fe5f2f2c..b6fe735c 100644
--- a/com.ibm.streamsx.kafka/.classpath
+++ b/com.ibm.streamsx.kafka/.classpath
@@ -7,11 +7,12 @@
+
-
+
-
+
@@ -29,24 +30,24 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
diff --git a/com.ibm.streamsx.kafka/.project b/com.ibm.streamsx.kafka/.project
index 7c1646fe..62016543 100644
--- a/com.ibm.streamsx.kafka/.project
+++ b/com.ibm.streamsx.kafka/.project
@@ -2,26 +2,33 @@
com.ibm.streamsx.kafka
-
-
- org.eclipse.xtext.ui.shared.xtextNature
- com.ibm.streams.studio.splproject.SPLProjectNature
- org.eclipse.jdt.core.javanature
-
+
+
org.eclipse.jdt.core.javabuilder
-
+
+
org.eclipse.xtext.ui.shared.xtextBuilder
-
+
+
- com.ibm.streams.studio.splproject.builder.SPLProjectBuilder
-
+ org.eclipse.ui.externaltools.ExternalToolBuilder
+ full,incremental,
+
+
+ LaunchConfigHandle
+ <project>/.externalToolBuilders/com.ibm.streams.studio.splproject.builder.SPLProjectBuilder.launch
+
+
-
-
+
+ org.eclipse.xtext.ui.shared.xtextNature
+ com.ibm.streams.studio.splproject.SPLProjectNature
+ org.eclipse.jdt.core.javanature
+
diff --git a/com.ibm.streamsx.kafka/.settings/org.eclipse.jdt.core.prefs b/com.ibm.streamsx.kafka/.settings/org.eclipse.jdt.core.prefs
index 907d8186..bf49d08b 100644
--- a/com.ibm.streamsx.kafka/.settings/org.eclipse.jdt.core.prefs
+++ b/com.ibm.streamsx.kafka/.settings/org.eclipse.jdt.core.prefs
@@ -1,9 +1,9 @@
#
-#Thu Sep 14 06:32:00 EDT 2017
+#Fri Aug 09 10:08:10 CEST 2019
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
eclipse.preferences.version=1
-org.eclipse.jdt.core.compiler.source=1.8
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.8
org.eclipse.jdt.core.compiler.processAnnotations=enabled
org.eclipse.jdt.core.compiler.compliance=1.8
diff --git a/com.ibm.streamsx.kafka/build.gradle b/com.ibm.streamsx.kafka/build.gradle
index c91ae334..4987c391 100644
--- a/com.ibm.streamsx.kafka/build.gradle
+++ b/com.ibm.streamsx.kafka/build.gradle
@@ -45,8 +45,8 @@ def STREAMS_INSTALL = System.getenv("STREAMS_INSTALL")
dependencies {
compile fileTree(dir: STREAMS_INSTALL + '/lib', include: ['com.ibm.streams.operator.jar'])
-// compile group: 'org.apache.kafka', name: 'kafka-clients', version: '1.0.0'
- compile group: 'org.apache.kafka', name: 'kafka-clients', version: '2.2.1'
+// compile group: 'org.apache.kafka', name: 'kafka-clients', version: '2.2.1'
+ compile group: 'org.apache.kafka', name: 'kafka-clients', version: '2.3.1'
compile group: 'org.apache.commons', name: 'commons-lang3', version: '3.5'
compile group: 'com.google.code.gson', name: 'gson', version: '2.8.1'
compile group: 'org.slf4j', name: 'slf4j-log4j12', version: '1.7.21'
@@ -68,6 +68,7 @@ task deleteDeps(type: Delete) {
delete "opt"
delete "bin"
delete "output"
+ delete "impl/java/bin"
delete fileTree(dir : 'com.ibm.streamsx.kafka', exclude : ['*.spl'])
delete "com.ibm.streamsx.kafka.messagehub"
delete fileTree(dir : 'impl/lib/', include : ['com.ibm.streamsx.kafka*.jar'])
diff --git a/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/Functions.spl b/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/Functions.spl
index 1d0c50e4..a2cfdb0e 100644
--- a/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/Functions.spl
+++ b/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/Functions.spl
@@ -19,9 +19,92 @@ rstring _createTopicPartitionOffsetObject (rstring topic, int32 partition) {
";
}
+rstring _createTopicObject (rstring topic) {
+ return "
+ {
+ \"topic\" : \"" + topic + "\"
+ }
+ ";
+}
+
+/**
+ * Creates the JSON message to add a single topic to the operator's subscription to begin consuming from the default position.
+ *
+ * @param topic The topic to subscribe
+ *
+ * @return A JSON string to be submitted to the KafkaConsumer input port
+ */
+public rstring createMessageAddTopic (rstring topic) {
+ return createMessageAddTopics ([topic]);
+}
+
+/**
+ * Creates the JSON message to add multiple topics to the operator's subscription to begin consuming from the default position.
+ *
+ * @param topics The topics to subscribe
+ *
+ * @return A JSON string to be submitted to the KafkaConsumer input port
+ */
+public rstring createMessageAddTopics (list topics) {
+
+ int32 listSize = size(topics);
+ mutable rstring toAdd = "";
+ for(int32 i in range(0, listSize)) {
+ toAdd += _createTopicObject (topics[i]);
+
+ if(i+1 < listSize)
+ toAdd += ",";
+ }
+
+ return "{
+ \"action\" : \"ADD\",
+ \"topics\" : [" +
+ toAdd +
+ "]
+ }";
+}
+
+/**
+ * Creates the JSON message to remove a single topic from the operator's subscription.
+ *
+ * @param topic The topic to unsubscribe
+ *
+ * @return A JSON string to be submitted to the KafkaConsumer input port
+ */
+public rstring createMessageRemoveTopic (rstring topic) {
+ return createMessageRemoveTopics ([topic]);
+}
+
+/**
+ * Creates the JSON message to remove multiple topics from the operator's subscription.
+ *
+ * @param topics The topics to unsubscribe
+ *
+ * @return A JSON string to be submitted to the KafkaConsumer input port
+ */
+public rstring createMessageRemoveTopics (list topics) {
+
+ int32 listSize = size(topics);
+ mutable rstring toRemove = "";
+ for(int32 i in range(0, listSize)) {
+ toRemove += _createTopicObject (topics[i]);
+
+ if (i+1 < listSize)
+ toRemove += ",";
+ }
+
+ return "{
+ \"action\" : \"REMOVE\",
+ \"topics\" : [" +
+ toRemove +
+ "]
+ }";
+}
+
+
/**
*
- * Creates the JSON message to remove multiple topic-partitions from the operator.
+ * Creates the JSON message to remove multiple topic partitions from the operator's partition assignment.
*
* @param topicPartitionsToRemove specifies a list of topic partitions to remove
*
@@ -46,14 +129,14 @@ public rstring createMessageRemoveTopicPartition (list t
}
/**
- * Creates the JSON message to add multiple topic-partitions to the operator.
- * The operator will begin consuming from the topic-partitions at their specified offset.
+ * Creates the JSON message to add multiple topic partitions to the operator's partition assignment.
+ * The operator will begin consuming from the topic partitions at their specified offset.
*
- * * To begin consuming from the **end** of a topic-partition, set the offset value to `-1`
- * * To begin consuming from the **beginning** of a topic-partition, set the offset value to `-2`
- * * To begin consuming from the **default** position, set the offset value to `-3`
+ * * To begin consuming from the **end** of a topic partition, set the offset value to `-1`
+ * * To begin consuming from the **beginning** of a topic partition, set the offset value to `-2`
+ * * To begin consuming from the **default** position, set the offset value to `-3`, what is effectively equivalent to `rstring createMessageAddTopicPartition (list topicPartitionsToAdd)`
*
- * @param topicPartitionsToAdd A list of topic-partitions to subscribe to along with the corresponding offset number to begin consuming from.
+ * @param topicPartitionsToAdd A list of topic partitions to assign to along with the corresponding offset number to begin consuming from.
*
* @return A JSON string to be submitted to the KafkaConsumer input port
*/
@@ -76,10 +159,10 @@ public rstring createMessageAddTopicPartition (list topi
/**
- * Creates the JSON message to remove a single topic-partition from the operator.
+ * Creates the JSON message to remove a single topic partition from the operator's partition assignment.
*
- * @param topic The topic to unsubscribe from
- * @param partition The partition to unsubscribe from
+ * @param topic The topic to which the partition belongs to
+ * @param partition The partition number of the topic to unassign
*
* @return A JSON string to be submitted to the KafkaConsumer input port
*/
@@ -115,15 +198,16 @@ public rstring createMessageRemoveTopicPartition (rstring topic, int32 partition
}
/**
- * Creates the JSON message to add a single topic-partition to the operator and to begin consuming at the specified offset.
+ * Creates the JSON message to add a single topic partition to the operator's partition assignment
+ * and to begin consuming at the specified offset.
*
- * * To begin consuming from the **end** of a topic-partition, set the offset value to `-1`
- * * To begin consuming from the **beginning** of a topic-partition, set the offset value to `-2`
- * * To begin consuming from the **default** position, set the offset value to `-3`
+ * * To begin consuming from the **end** of a topic partition, set the offset value to `-1`
+ * * To begin consuming from the **beginning** of a topic partition, set the offset value to `-2`
+ * * To begin consuming from the **default** position, set the offset value to `-3`, what is effectively equivalent to `rstring createMessageAddTopicPartition (rstring topic, int32 partition)`
*
- * @param topic The topic to subscribe to
- * @param partition The partition number to assign to
- * @param offset The offset of the topic-partition to begin consuming from
+ * @param topic The topic to which the partition belongs to
+ * @param partition The partition number of the topic to assign to
+ * @param offset The offset of the topic partition to begin consuming from
*
* @return A JSON string to be submitted to the KafkaConsumer input port
*/
@@ -132,10 +216,11 @@ public rstring createMessageAddTopicPartition (rstring topic, int32 partition, i
}
/**
- * Creates the JSON message to add a single topic-partition to the operator and to begin consuming at the default position.
+ * Creates the JSON message to add a single topic partition to the operator's partition assignment and
+ * to begin consuming at the default position.
*
- * @param topic The topic to subscribe to
- * @param partition The partition number to assign to
+ * @param topic The topic to which the partition belongs to
+ * @param partition The partition number of the topic to assign to
*
* @return A JSON string to be submitted to the KafkaConsumer input port
*/
diff --git a/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/namespace-info.spl b/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/namespace-info.spl
index ee7bff97..6c277350 100644
--- a/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/namespace-info.spl
+++ b/com.ibm.streamsx.kafka/com.ibm.streamsx.kafka/namespace-info.spl
@@ -2,33 +2,35 @@
* This SPL namespace contains all functions and operators that are required to integrate with Kafka message brokers.
*
* + Compatibility
- * This toolkit is compatible with Kafka brokers version 0.10.2 and later including 2.3.x. This toolkit contains the Kafka client library version 2.2.1.
- * Note, that Kafka transactions cannot be used when you connect to a Kafka broker that is below version 0.11, and that Zstandard
- * compression is incompatible with brokers below version 2.1.
+ * This toolkit is compatible with Kafka brokers version 0.10.2 and later including 2.3.x.
+ * This toolkit contains the Kafka client library version 2.3.1.
+ *
+ * * To use Kafka transactions with the `KafkaProducer`, the Kafka server must be at version 0.11 or higher.
+ * * To use Zstandard compression with the `KafkaProducer`, the Kafka server must be at version 2.1 or higher.
+ * * To use the `guaranteeOrdering: true;` parameter of the `KafkaProducer`, the Kafka server must be at version 0.11 or higher.
+ * * To configure the `KafkaConsumer` operator as a static consumer group member, the Kafka server must be at version 2.3 or higher.
*
* + Configuration
*
* The operators use Kafka's Producer or Consumer API. As a consequence, you can specify all properties for the APIs to control
- * the behavior of the operators. The producer configuration for Apache Kafka 2.2 can be found in the
- * [https://kafka.apache.org/22/documentation/#producerconfigs|producer configs section of the Apache Kafka 2.2 documentation],
+ * the behavior of the operators. The producer configuration for Apache Kafka 2.3 can be found in the
+ * [https://kafka.apache.org/documentation/#producerconfigs|producer configs section of the Apache Kafka 2.3 documentation],
* the configs for the Apache Kafka Consumer can be found in the
- * [https://kafka.apache.org/22/documentation/#consumerconfigs|consumer configs section of the Apache Kafka 2.2 documentation].
+ * [https://kafka.apache.org/documentation/#consumerconfigs|consumer configs section of the Apache Kafka 2.3 documentation].
*
* Properties can be specified in different ways:
- * * As an Application configuration
- *
- * You must specify the name of the App Config in the **appConfigName** parameter.
- * * in a property file
- *
- * You must specify the filename in the **propertiesFile** parameter.
- * * operator parameters **clientId** and **groupId** (KafkaConsumer operator only)
+ *
+ * * as an Application configuration - you must specify the name of the App Config in the **appConfigName** parameter
+ * * in a property file - you must specify the filename in the **propertiesFile** parameter
+ * * operator parameters **clientId** and **groupId** (groupId for the KafkaConsumer operator only)
*
* Kafka properties can be specified by using these mechanisms simultaneously. The precedence is
+ *
* * parameter
* * property file
- * * application configuration. Please note that an application configuration with instance scope takes precedence over a
- * configuration with same name at domain level.
- *
+ * * application configuration. For Streams version 4.x users: Please note that an application configuration with
+ * instance scope takes precedence over a configuration with same name at domain level.
+ *
*/
namespace com.ibm.streamsx.kafka;
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/TopicPartitionUpdateParseException.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/ControlportJsonParseException.java
similarity index 76%
rename from com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/TopicPartitionUpdateParseException.java
rename to com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/ControlportJsonParseException.java
index 536fa8da..0f4d8e3c 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/TopicPartitionUpdateParseException.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/ControlportJsonParseException.java
@@ -16,7 +16,7 @@
/**
* @author IBM Kafka toolkit maintainers
*/
-public class TopicPartitionUpdateParseException extends KafkaOperatorRuntimeException {
+public class ControlportJsonParseException extends KafkaOperatorRuntimeException {
private static final long serialVersionUID = 1L;
@@ -39,21 +39,21 @@ public void setJson (String json) {
/**
*
*/
- public TopicPartitionUpdateParseException() {
+ public ControlportJsonParseException() {
super();
}
/**
* @param message
*/
- public TopicPartitionUpdateParseException (String message) {
+ public ControlportJsonParseException (String message) {
super (message);
}
/**
* @param cause
*/
- public TopicPartitionUpdateParseException(Throwable cause) {
+ public ControlportJsonParseException(Throwable cause) {
super(cause);
}
@@ -61,7 +61,7 @@ public TopicPartitionUpdateParseException(Throwable cause) {
* @param message
* @param cause
*/
- public TopicPartitionUpdateParseException(String message, Throwable cause) {
+ public ControlportJsonParseException(String message, Throwable cause) {
super(message, cause);
}
@@ -71,7 +71,7 @@ public TopicPartitionUpdateParseException(String message, Throwable cause) {
* @param enableSuppression
* @param writableStackTrace
*/
- public TopicPartitionUpdateParseException(String message, Throwable cause, boolean enableSuppression,
+ public ControlportJsonParseException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/Features.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/Features.java
index aca0dc7c..91525c7e 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/Features.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/Features.java
@@ -18,16 +18,8 @@
*/
public class Features {
/**
- * When set to true, consumer groups outside a consistent region with startPosition != Default are enabled.
- * When set to false, group management is automatically disabled when startPosition != Default and not in a CR.
- * This feature requires a JobControlPlane.
+ * When set to true, group management is also enabled when no group-ID is given, i.e. with generated group-ID.
+ * When set to false, group management is disabled without user specified group identifier.
*/
- public static boolean ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION = !SystemProperties.isLegacyBehavior();
-
- /**
- * When set to true, the consumer does not seek to initial startPosition when not in consistent region.
- * When false, the consumer seeks to what startPosition is after every restart.
- * This feature requires a JobControlPlane.
- */
- public static boolean ENABLE_NOCR_NO_CONSUMER_SEEK_AFTER_RESTART = !SystemProperties.isLegacyBehavior();
+ public static boolean ENABLE_GROUP_MANAGEMENT_NO_USER_GROUP_ID = !SystemProperties.isLegacyBehavior();
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorCheckpointException.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorCheckpointException.java
new file mode 100644
index 00000000..3d52ee54
--- /dev/null
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorCheckpointException.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.ibm.streamsx.kafka;
+
+/**
+ * RuntimeException that is thrown when checkpoint in a Consistent Region failed.
+ * @author IBM Kafka toolkit team
+ */
+public class KafkaOperatorCheckpointException extends KafkaOperatorRuntimeException {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Constructs a new KafkaOperatorCheckpointException
+ */
+ public KafkaOperatorCheckpointException() {
+ super();
+ }
+
+ /**
+ * Constructs a new KafkaOperatorCheckpointException
+ * @param message the exception message
+ */
+ public KafkaOperatorCheckpointException (String message) {
+ super (message);
+ }
+
+ /**
+ * Constructs a new KafkaOperatorCheckpointException
+ * @param cause the cause of the exception
+ */
+ public KafkaOperatorCheckpointException (Throwable cause) {
+ super (cause);
+ }
+
+ /**
+ * Constructs a new KafkaOperatorCheckpointException
+ * @param message the exception message
+ * @param cause the cause of the exception
+ */
+ public KafkaOperatorCheckpointException (String message, Throwable cause) {
+ super (message, cause);
+ }
+
+ /**
+ * Constructs a new KafkaOperatorCheckpointException
+ * @param message the exception message
+ * @param cause the cause of the exception
+ * @param enableSuppression
+ * @param writableStackTrace
+ */
+ public KafkaOperatorCheckpointException (String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
+ super (message, cause, enableSuppression, writableStackTrace);
+ }
+
+}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorException.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorException.java
index 0c3a56e5..be44858e 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorException.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorException.java
@@ -24,20 +24,21 @@ public class KafkaOperatorException extends Exception {
* Constructs a new KafkaOperatorException
*/
public KafkaOperatorException() {
+ this ("", null);
}
/**
* @param message
*/
public KafkaOperatorException (String message) {
- super(message);
+ this (message, null);
}
/**
* @param rootCause
*/
public KafkaOperatorException (Throwable rootCause) {
- super(rootCause);
+ this ("", rootCause);
}
/**
@@ -45,9 +46,9 @@ public KafkaOperatorException (Throwable rootCause) {
* @param rootCause
*/
public KafkaOperatorException (String message, Throwable rootCause) {
- super(message, rootCause);
+ super (message, rootCause);
}
-
+
/**
* gets the root cause of the exception
*
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorNotRegisteredException.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorNotRegisteredException.java
new file mode 100644
index 00000000..5c7cd644
--- /dev/null
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorNotRegisteredException.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.ibm.streamsx.kafka;
+
+/**
+ * This exception indicates that an operator is not registered in the MXBean
+ * @author IBM Kafka toolkit maintainers
+ */
+public class KafkaOperatorNotRegisteredException extends KafkaOperatorCheckpointException {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Constructs a new KafkaOperatorNotRegisteredException
+ */
+ public KafkaOperatorNotRegisteredException() {
+ super();
+ }
+
+ /**
+ * Constructs a new KafkaOperatorNotRegisteredException
+ * @param message - the detail message.
+ */
+ public KafkaOperatorNotRegisteredException(String message) {
+ super(message);
+ }
+
+ /**
+ * Constructs a new KafkaOperatorNotRegisteredException
+ * @param cause - the cause. (A null value is permitted, and indicates that the cause is nonexistent or unknown.)
+ */
+ public KafkaOperatorNotRegisteredException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * Constructs a new KafkaOperatorNotRegisteredException
+ * @param message - the detail message.
+ * @param cause - the cause. (A null value is permitted, and indicates that the cause is nonexistent or unknown.)
+ */
+ public KafkaOperatorNotRegisteredException(String message, Throwable cause) {
+ super (message, cause);
+ }
+
+ /**
+ * Constructs a new KafkaOperatorNotRegisteredException
+ * @param message - the detail message.
+ * @param cause - the cause. (A null value is permitted, and indicates that the cause is nonexistent or unknown.)
+ * @param enableSuppression
+ * @param writableStackTrace
+ */
+ public KafkaOperatorNotRegisteredException(String message, Throwable cause, boolean enableSuppression,
+ boolean writableStackTrace) {
+ super (message, cause, enableSuppression, writableStackTrace);
+ }
+}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorRuntimeException.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorRuntimeException.java
index 74b8bbc8..ffb4b074 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorRuntimeException.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/KafkaOperatorRuntimeException.java
@@ -25,21 +25,21 @@ public class KafkaOperatorRuntimeException extends RuntimeException {
*
*/
public KafkaOperatorRuntimeException() {
- super ();
+ this ("", null);
}
/**
* @param message
*/
public KafkaOperatorRuntimeException(String message) {
- super(message);
+ super(message, null);
}
/**
* @param cause
*/
public KafkaOperatorRuntimeException(Throwable cause) {
- super(cause);
+ this("", cause);
}
/**
@@ -47,7 +47,7 @@ public KafkaOperatorRuntimeException(Throwable cause) {
* @param cause
*/
public KafkaOperatorRuntimeException(String message, Throwable cause) {
- super(message, cause);
+ this(message, cause, false, true);
}
/**
@@ -59,4 +59,19 @@ public KafkaOperatorRuntimeException(String message, Throwable cause) {
public KafkaOperatorRuntimeException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
+
+ /**
+ * gets the root cause of the exception
+ *
+ * @return the root cause or `null` if there is none.
+ */
+ public Throwable getRootCause() {
+ Throwable rootCause = null;
+ Throwable cause = getCause();
+ while (cause != null) {
+ rootCause = cause;
+ cause = cause.getCause();
+ }
+ return rootCause;
+ }
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/AbstractKafkaClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/AbstractKafkaClient.java
index 1040bc40..31ac843e 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/AbstractKafkaClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/AbstractKafkaClient.java
@@ -32,6 +32,8 @@
import com.ibm.streams.operator.OperatorContext;
import com.ibm.streams.operator.control.ControlPlaneContext;
+import com.ibm.streams.operator.metrics.Metric;
+import com.ibm.streams.operator.metrics.OperatorMetrics;
import com.ibm.streams.operator.types.Blob;
import com.ibm.streams.operator.types.RString;
import com.ibm.streamsx.kafka.KafkaConfigurationException;
@@ -77,7 +79,7 @@ public abstract class AbstractKafkaClient {
public AbstractKafkaClient (OperatorContext operatorContext, KafkaOperatorProperties kafkaProperties, boolean isConsumer) {
this.operatorContext = operatorContext;
- logger.info ("instantiating client: " + getThisClassName());
+ logger.info ("instantiating client: " + getThisClassName() + " (magic " + getImplementationMagic() + ")");
this.jcpContext = operatorContext.getOptionalContext (ControlPlaneContext.class);
// Create a unique client ID for the consumer if one is not specified or add the UDP channel when specified and in UDP
// This is important, otherwise running multiple consumers from the same
@@ -115,6 +117,13 @@ public AbstractKafkaClient (OperatorContext operatorContext, KafkaOperatorProper
}
}
+ /**
+ * Returns the implementation magic number.
+ * @return a hash number of the implementation of the runtime class name: getThisClassName().hashCode()
+ */
+ public int getImplementationMagic() {
+ return getThisClassName().hashCode();
+ }
/**
* returns the operator context.
@@ -161,6 +170,22 @@ public String getThisClassName() {
}
+ /**
+ * Tests existence of a custom metric and creates the metric if it does not yet exist.
+ * @param name the name of the metric
+ * @param descr the description
+ * @param kind the kind of the metric
+ * @return the Metric object.
+ */
+ protected Metric tryCreateCustomMetric (final String name, final String descr, final Metric.Kind kind) {
+ OperatorMetrics metrics = getOperatorContext().getMetrics();
+ Metric m = metrics.getCustomMetrics().get (name.trim());
+ if (m != null)
+ return m;
+ return metrics.createCustomMetric (name, descr, kind);
+ }
+
+
public static String getSerializer(Class clazz) throws KafkaConfigurationException {
if (clazz == null) throw new KafkaConfigurationException ("Unable to find serializer for 'null'");
if (clazz.equals(String.class) || clazz.equals(RString.class)) {
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractKafkaConsumerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractKafkaConsumerClient.java
index 0347d3c1..de5e08f4 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractKafkaConsumerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractKafkaConsumerClient.java
@@ -116,6 +116,8 @@ public abstract class AbstractKafkaConsumerClient extends AbstractKafkaClient im
private final Metric nQueueFullPause;
private final Metric nConsumedTopics;
protected final Metric nAssignedPartitions;
+ protected final Metric nFailedControlTuples;
+ protected final Metric isGroupManagementActive;
// Lock/condition for when we pause processing due to
// no space on the queue or low memory.
@@ -143,10 +145,15 @@ protected AbstractKafkaConsumerClient (final OperatorContext operatorCont
super (operatorContext, kafkaProperties, true);
this.kafkaProperties = kafkaProperties;
+
if (!kafkaProperties.containsKey(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)) {
this.kafkaProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, getDeserializer(keyClass));
}
+ if (!kafkaProperties.containsKey(ConsumerConfig.ISOLATION_LEVEL_CONFIG)) {
+ this.kafkaProperties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
+ }
+
if (!kafkaProperties.containsKey(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)) {
this.kafkaProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, getDeserializer(valueClass));
}
@@ -157,9 +164,8 @@ protected AbstractKafkaConsumerClient (final OperatorContext operatorCont
this.groupId = kafkaProperties.getProperty (ConsumerConfig.GROUP_ID_CONFIG);
}
else {
- ProcessingElement pe = operatorContext.getPE();
- this.groupId = "D" + pe.getDomainId().hashCode() + pe.getInstanceId().hashCode()
- + pe.getJobId() + operatorContext.getName().hashCode();
+ this.groupId = generateGroupId (operatorContext);
+ logger.info ("Generated group.id: " + this.groupId);
this.kafkaProperties.put(ConsumerConfig.GROUP_ID_CONFIG, this.groupId);
this.groupIdGenerated = true;
}
@@ -203,7 +209,25 @@ protected AbstractKafkaConsumerClient (final OperatorContext operatorCont
this.nLowMemoryPause = operatorContext.getMetrics().getCustomMetric ("nLowMemoryPause");
this.nQueueFullPause = operatorContext.getMetrics().getCustomMetric ("nQueueFullPause");
this.nAssignedPartitions = operatorContext.getMetrics().getCustomMetric ("nAssignedPartitions");
+ this.nFailedControlTuples = operatorContext.getMetrics().getCustomMetric ("nFailedControlTuples");
this.nConsumedTopics = operatorContext.getMetrics().getCustomMetric ("nConsumedTopics");
+ this.isGroupManagementActive = operatorContext.getMetrics().getCustomMetric ("isGroupManagementActive");
+ }
+
+ /**
+ * Generates a group identifier that is consistent accross PE relaunches, but not accross job submissions.
+ * @param operatorContext
+ * @return a group identifier
+ */
+ private String generateGroupId (final OperatorContext context) {
+ final ProcessingElement pe = context.getPE();
+ final int iidH = pe.getInstanceId().hashCode();
+ final int opnH = context.getName().hashCode();
+ final String id = MsgFormatter.format ("i{0}-j{1}-o{2}",
+ (iidH < 0? "N" + (-iidH): "P" + iidH),
+ "" + pe.getJobId(),
+ (opnH < 0? "N" + (-opnH): "P" + opnH));
+ return id;
}
/**
@@ -311,6 +335,7 @@ public void run() {
}
catch (Exception e) {
initializationException = e;
+ return;
}
finally {
consumerInitLatch.countDown(); // notify that consumer is ready
@@ -368,6 +393,15 @@ public boolean isSubscribedOrAssigned() {
}
+ /**
+ * The default implementation returns false.
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#supports(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
+ */
+ @Override
+ public boolean supports (ControlPortAction action) {
+ return false;
+ }
+
/**
* Runs a loop and consumes the event queue until the processing flag is set to false.
* @throws InterruptedException the thread has been interrupted
@@ -394,11 +428,13 @@ private void runEventLoop() throws InterruptedException {
case STOP_POLLING:
event.countDownLatch(); // indicates that polling has stopped
break;
- case UPDATE_ASSIGNMENT:
+ case CONTROLPORT_EVENT:
+ final ControlPortAction data = (ControlPortAction) event.getData();
try {
- processUpdateAssignmentEvent ((TopicPartitionUpdate) event.getData());
+ processControlPortActionEvent (data);
} catch (Exception e) {
- logger.error("The assignment '" + (TopicPartitionUpdate) event.getData() + "' update failed: " + e.getLocalizedMessage());
+ nFailedControlTuples.increment();
+ logger.error("The control processing '" + data + "' failed: " + e.getLocalizedMessage());
} finally {
event.countDownLatch();
}
@@ -470,12 +506,12 @@ protected void commitOffsets (CommitInfo offsets) throws RuntimeException {
Map map = new HashMap<>(1);
for (TopicPartition tp: offsetMap.keySet()) {
// do not commit for partitions we are not assigned
- if (!currentAssignment.contains(tp)) continue;
+ if (!currentAssignment.contains (tp)) continue;
map.clear();
- map.put(tp, offsetMap.get(tp));
+ map.put (tp, offsetMap.get (tp));
if (offsets.isCommitSynchronous()) {
try {
- consumer.commitSync(map);
+ consumer.commitSync (map);
postOffsetCommit (map);
}
catch (CommitFailedException e) {
@@ -589,7 +625,7 @@ protected void shutdown() {
* @param update the update increment/decrement
* @throws Exception
*/
- protected abstract void processUpdateAssignmentEvent (TopicPartitionUpdate update);
+ protected abstract void processControlPortActionEvent (ControlPortAction update);
/**
* This method must be overwritten by concrete classes.
@@ -621,28 +657,29 @@ protected void shutdown() {
*/
@Override
public ConsumerRecord, ?> getNextRecord (long timeout, TimeUnit timeUnit) throws InterruptedException {
- ConsumerRecord,?> record = null;
+ preDeQueueForSubmit();
if (messageQueue.isEmpty()) {
// assuming, that the queue is not filled concurrently...
+ msgQueueLock.lock();
msgQueueProcessed.set (true);
- try {
- msgQueueLock.lock();
- msgQueueEmptyCondition.signalAll();
- } finally {
- msgQueueLock.unlock();
- }
+ msgQueueEmptyCondition.signalAll();
+ msgQueueLock.unlock();
+ }
+ else {
+ msgQueueProcessed.set (false);
}
- else msgQueueProcessed.set (false);
// if filling the queue is NOT stopped, we can, of cause,
// fetch a record now from the queue, even when we have seen an empty queue, shortly before...
-
- preDeQueueForSubmit();
// messageQueue.poll throws InterruptedException
- record = messageQueue.poll (timeout, timeUnit);
+ ConsumerRecord,?> record = messageQueue.poll (timeout, timeUnit);
if (record == null) {
- // no messages - queue is empty
+ // no messages - queue is empty, i.e. it was empty at the time we polled
if (logger.isTraceEnabled()) logger.trace("getNextRecord(): message queue is empty");
nPendingMessages.setValue (messageQueue.size());
+ msgQueueProcessed.set (true);
+ }
+ else {
+ msgQueueProcessed.set (false);
}
return record;
}
@@ -692,14 +729,17 @@ protected int clearDrainBuffer() {
* @throws InterruptedException The waiting thread has been interrupted waiting
*/
protected void awaitMessageQueueProcessed() throws InterruptedException {
- while (!(messageQueue.isEmpty() && msgQueueProcessed.get())) {
- try {
- msgQueueLock.lock();
+ final long start = System.nanoTime();
+ msgQueueLock.lock();
+ try {
+ while (!(messageQueue.isEmpty() && msgQueueProcessed.get())) {
msgQueueEmptyCondition.await (100l, TimeUnit.MILLISECONDS);
}
- finally {
- msgQueueLock.unlock();
- }
+ }
+ finally {
+ msgQueueLock.unlock();
+ final long stop = System.nanoTime();
+ logger.log (DEBUG_LEVEL, "waiting for message queue being processed took " + (stop-start)/1_000_000L + " ms.");
}
}
@@ -778,7 +818,7 @@ protected void runPollLoop (long pollTimeout, long throttleSleepMillis) throws I
final int nMessages = r.getNumRecords();
final long nQueuedBytes = r.getSumTotalSize();
final Level l = Level.DEBUG;
-// final Level l = DEBUG_LEVEL;
+ // final Level l = DEBUG_LEVEL;
if (logger.isEnabledFor (l) && nMessages > 0) {
logger.log (l, MsgFormatter.format ("{0,number,#} records with total {1,number,#}/{2,number,#}/{3,number,#} bytes (key/value/sum) fetched and enqueued",
nMessages, r.getSumKeySize(), r.getSumValueSize(), nQueuedBytes));
@@ -802,6 +842,7 @@ protected void runPollLoop (long pollTimeout, long throttleSleepMillis) throws I
// catches also 'java.io.IOException: Broken pipe' when SSL is used
logger.warn ("Exception caugt: " + e, e);
if (++nConsecutiveRuntimeExc >= 50) {
+ logger.error (e);
throw new KafkaOperatorRuntimeException ("Consecutive number of exceptions too high (50).", e);
}
logger.info ("Going to sleep for 100 ms before next poll ...");
@@ -829,7 +870,7 @@ private void tryAdjustMinFreeMemory (long numBytes, int nMessages) {
final long newMinAlloc = minAllocatableMemorySaveSetting (numBytes);
if (newMinAlloc <= this.minAllocatableMemoryAdjusted)
return;
-
+
logger.warn (MsgFormatter.format ("adjusting the minimum allocatable memory from {0} to {1} to fetch new Kafka messages", this.minAllocatableMemoryAdjusted, newMinAlloc));
//Example: max = 536,870,912, total = 413,073,408, free = 7,680,336
// now let's see if this would be possible
@@ -1080,10 +1121,14 @@ protected void assign (Set topicPartitions) {
protected void subscribe (Collection topics, ConsumerRebalanceListener rebalanceListener) {
logger.info("Subscribing. topics = " + topics); //$NON-NLS-1$
if (topics == null) topics = Collections.emptyList();
+ if (topics.isEmpty()) {
+ setConsumedTopics (null);
+ this.assignedPartitions = new HashSet ();
+ nAssignedPartitions.setValue (0L);
+ } else {
+ tryCreateCustomMetric (N_PARTITION_REBALANCES, "Number of partition rebalances within the consumer group", Metric.Kind.COUNTER);
+ }
consumer.subscribe (topics, rebalanceListener);
- try {
- getOperatorContext().getMetrics().createCustomMetric (N_PARTITION_REBALANCES, "Number of partition rebalances within the consumer group", Metric.Kind.COUNTER);
- } catch (IllegalArgumentException metricExits) { /* really nothing to be done */ }
this.subscriptionMode = topics.isEmpty()? SubscriptionMode.NONE: SubscriptionMode.SUBSCRIBED;
}
@@ -1099,10 +1144,8 @@ protected void subscribe (Pattern pattern, ConsumerRebalanceListener rebalanceLi
consumer.unsubscribe();
}
else {
+ tryCreateCustomMetric (N_PARTITION_REBALANCES, "Number of partition rebalances within the consumer group", Metric.Kind.COUNTER);
consumer.subscribe (pattern, rebalanceListener);
- try {
- getOperatorContext().getMetrics().createCustomMetric (N_PARTITION_REBALANCES, "Number of partition rebalances within the consumer group", Metric.Kind.COUNTER);
- } catch (IllegalArgumentException metricExits) { /* really nothing to be done */ }
}
this.subscriptionMode = SubscriptionMode.SUBSCRIBED;
}
@@ -1115,8 +1158,8 @@ protected void subscribe (Pattern pattern, ConsumerRebalanceListener rebalanceLi
* @throws InterruptedException The thread waiting for finished condition has been interrupted.
*/
@Override
- public void onTopicAssignmentUpdate (final TopicPartitionUpdate update) throws InterruptedException {
- Event event = new Event(EventType.UPDATE_ASSIGNMENT, update, true);
+ public void onControlPortAction (final ControlPortAction update) throws InterruptedException {
+ Event event = new Event(EventType.CONTROLPORT_EVENT, update, true);
sendEvent (event);
event.await();
}
@@ -1131,6 +1174,7 @@ public void onTopicAssignmentUpdate (final TopicPartitionUpdate update) throws I
*/
@Override
public void onShutdown (long timeout, TimeUnit timeUnit) throws InterruptedException {
+ if (!isProcessing()) return;
Event event = new Event(EventType.SHUTDOWN, true);
sendEvent (event);
event.await (timeout, timeUnit);
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractNonCrKafkaConsumerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractNonCrKafkaConsumerClient.java
index 1ed348db..859e33af 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractNonCrKafkaConsumerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/AbstractNonCrKafkaConsumerClient.java
@@ -348,7 +348,9 @@ protected void preDeQueueForSubmit() {
// assigned to the partition any more when building a consumer group.
// Then a different (or the same) consumer starts reading the records again creating duplicates within the application.
// This is normal Kafka methodology.
- sendStartPollingEvent();
+ if (isSubscribedOrAssigned()) {
+ sendStartPollingEvent();
+ }
}
} catch (InterruptedException e) {
// is not thrown when asynchronously committed; can be silently ignored.
@@ -370,7 +372,9 @@ public void postSubmit (ConsumerRecord, ?> submittedRecord) {
// collect submitted offsets per topic partition for periodic commit.
try {
synchronized (offsetManager) {
- offsetManager.savePosition(submittedRecord.topic(), submittedRecord.partition(), submittedRecord.offset() +1l, /*autoCreateTopci=*/true);
+ final String topic = submittedRecord.topic();
+ final int partition = submittedRecord.partition();
+ offsetManager.savePosition (topic, partition, submittedRecord.offset() +1l, /*autoCreateTopci=*/true);
}
} catch (Exception e) {
// is not caught when autoCreateTopic is 'true'
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClient.java
index caeda04c..35f7d51b 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClient.java
@@ -31,6 +31,12 @@ public interface ConsumerClient {
public static final String DRAIN_TIME_MILLIS_MAX_METRIC_NAME = "drainTimeMillisMax";
public static final String DRAIN_TIME_MILLIS_METRIC_NAME = "drainTimeMillis";
+ /**
+ * Returns the implementation magic number.
+ * @return a hash number of the implementation of the runtime class.
+ */
+ public int getImplementationMagic();
+
/**
* Returns the client-ID, which is the value of the Kafka consumer property client.id
* @return the client-ID
@@ -113,6 +119,14 @@ void subscribeToTopicsWithOffsets(final String topic, final List partit
*/
boolean isSubscribedOrAssigned();
+ /**
+ * Tests if a client supports an action triggered via control port.
+ * If an action is not supported, {@link #onControlPortAction(ControlPortAction)} should not be invoked.
+ * @param action The action
+ * @return true, if the client implementation supports the action, false otherwise
+ */
+ boolean supports (ControlPortAction action);
+
/**
* Initiates start of polling for KafKa messages.
* Implementations should ignore this event if the consumer is not subscribed or assigned to partitions.
@@ -127,13 +141,13 @@ void subscribeToTopicsWithOffsets(final String topic, final List partit
void sendStopPollingEvent() throws InterruptedException;
/**
- * Initiates topic partition assignment update. When this method is called, the consumer must be assigned to topic partitions.
- * If the consumer is subscribed to topics, the request is ignored.
- * Implementations ensure assignments have been updated when this method returns.
+ * Initiates the action on control port event.
+ * If the consumer does not support the action, the behavior is upon the consumer implementation.
* @param update The the partition update.
* @throws InterruptedException The thread waiting for finished condition has been interruped.
+ * @see #supports(ControlPortAction)
*/
- void onTopicAssignmentUpdate (final TopicPartitionUpdate update) throws InterruptedException;
+ void onControlPortAction (final ControlPortAction update) throws InterruptedException;
/**
* Action to be performed on consistent region drain.
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClientBuilder.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClientBuilder.java
new file mode 100644
index 00000000..cfd90285
--- /dev/null
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerClientBuilder.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.ibm.streamsx.kafka.clients.consumer;
+
+/**
+ * Builds a consumer client implementation.
+ * @author The IBM Kafka toolkit maintainers
+ * @since 3.0
+ */
+public interface ConsumerClientBuilder {
+ /**
+ * Builds a Consumer client.
+ * @return the consumer client
+ */
+ ConsumerClient build() throws Exception;
+
+ /**
+ * Returns the implementation magic number of the built clients.
+ * @return a hash number of the implementation of the runtime class.
+ */
+ public int getImplementationMagic();
+}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerTimeouts.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerTimeouts.java
index a274cc2b..c32beab9 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerTimeouts.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ConsumerTimeouts.java
@@ -36,21 +36,9 @@ public class ConsumerTimeouts {
// consumers while a consumer is restarted. When a consumer restarts (subscribes)
// the partitions are re-assigned anyway.
// When a consumer closes the client (graceful shutdown on stopPE) the group coordinator initializes re-balance immediately.
- private static final long SESSION_TIMEOUT_MS = 20000;
+ private static final long SESSION_TIMEOUT_MS_DYNAMIC_GRP = 20000;
+ private static final long SESSION_TIMEOUT_MS_STATIC_GRP = 120000;
private static final long METADATA_MAX_AGE_MS = 2000;
-// auto.commit.interval.ms = 5000 -
-// connections.max.idle.ms = 540000
-// fetch.max.wait.ms = 500
-// heartbeat.interval.ms = 3000
-// max.poll.interval.ms = 540000 x
-// metadata.max.age.ms = 300000
-// metrics.sample.window.ms = 30000
-// reconnect.backoff.max.ms = 1000
-// reconnect.backoff.ms = 50
-// request.timeout.ms = 125000 x
-// retry.backoff.ms = 100
-// session.timeout.ms = 120000 x
-
@SuppressWarnings("unused")
private final OperatorContext opContext;
@@ -75,7 +63,9 @@ public ConsumerTimeouts (OperatorContext operatorContext, KafkaOperatorPropertie
crResetTimeoutMs = 0;
crDrainTimeoutMs = 0;
}
- this.kafkaProperties = kafkaProperties;
+ // clone the Kafka properties to avoid they being changed when properties are setup.
+ this.kafkaProperties = new KafkaOperatorProperties();
+ this.kafkaProperties.putAll (kafkaProperties);
}
/**
@@ -90,11 +80,19 @@ public long getMaxPollIntervalMs () {
}
/**
- * Returns the minimum timeout for session.timeout.ms in milliseconds, which should be higher than the PE restart + reset time of an operator.
+ * Returns the minimum timeout for session.timeout.ms in milliseconds, which should be higher than the PE restart + reset time of an operator.
+ * With set group.instance.id, the value is 900000, 15 minutes.
* @return the recommended value for session.timeout.ms
*/
public long getSessionTimeoutMs () {
- return SESSION_TIMEOUT_MS;
+ final boolean isDynamicGroupMember = kafkaProperties.getProperty (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, "").trim().isEmpty();
+ if (isDynamicGroupMember) return SESSION_TIMEOUT_MS_DYNAMIC_GRP;
+
+ // default broker configs for maximum values:
+ // group.max.session.timeout.ms = 300000 (5 min) for Kafka <= 2.2
+ // group.max.session.timeout.ms = 1800000 (30 min) since Kafka 2.3
+ final long crResetTo12 = (long) (1.2 * crResetTimeoutMs);
+ return crResetTo12 > SESSION_TIMEOUT_MS_STATIC_GRP? crResetTo12: SESSION_TIMEOUT_MS_STATIC_GRP;
}
/**
@@ -102,8 +100,11 @@ public long getSessionTimeoutMs () {
* @return the recommended value for request.timeout.ms
*/
public long getRequestTimeoutMs () {
- long sessionTimeoutMs = SESSION_TIMEOUT_MS;
- if (kafkaProperties.containsKey (ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG)) {
+ long sessionTimeoutMs = SESSION_TIMEOUT_MS_DYNAMIC_GRP;
+ // when group.instance.id is set, the session timeout may have been set to a really high value.
+ // In this case do NOT uses session.timeout.ms + 5s
+ final boolean isDynamicGroupMember = kafkaProperties.getProperty (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, "").trim().isEmpty();
+ if (kafkaProperties.containsKey (ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG) && isDynamicGroupMember) {
sessionTimeoutMs = Long.valueOf (kafkaProperties.getProperty (ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG));
}
return sessionTimeoutMs + 5000;
@@ -127,11 +128,24 @@ public long getJmxResetNotificationTimeout() {
*/
public void adjust (KafkaOperatorProperties kafkaProperties) {
adjustPropertyToMin (kafkaProperties, ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, getMaxPollIntervalMs());
- adjustPropertyToMax (kafkaProperties, ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, getSessionTimeoutMs());
+ setPropertyIfUnset (kafkaProperties, ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "" + getSessionTimeoutMs());
adjustPropertyToMin (kafkaProperties, ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, getRequestTimeoutMs());
adjustPropertyToMax (kafkaProperties, ConsumerConfig.METADATA_MAX_AGE_CONFIG, METADATA_MAX_AGE_MS);
}
+ /**
+ * Sets a property value if the key is not yet existing in given kafkaProperties
+ * @param kafkaProperties The kafka properties that get mutated
+ * @param propertyName the property name
+ * @param value the property value to set
+ */
+ private void setPropertyIfUnset (KafkaOperatorProperties kafkaProperties, final String propertyName, final String value) {
+ if (!kafkaProperties.containsKey (propertyName)) {
+ kafkaProperties.put (propertyName, value);
+ trace.info (MsgFormatter.format ("consumer config ''{0}'' has been set to {1}.", propertyName, value));
+ }
+ }
+
/**
* Mutates a single numeric property to a minimum value.
* @param kafkaProperties The kafka properties that get mutated
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ControlPortAction.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ControlPortAction.java
new file mode 100644
index 00000000..a27afb3f
--- /dev/null
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ControlPortAction.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.ibm.streamsx.kafka.clients.consumer;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.kafka.common.TopicPartition;
+import org.apache.log4j.Logger;
+
+import com.google.gson.Gson;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.ibm.streamsx.kafka.ControlportJsonParseException;
+import com.ibm.streamsx.kafka.i18n.Messages;
+
+public class ControlPortAction {
+
+ private final static Logger trace = Logger.getLogger (ControlPortAction.class);
+
+ private final static Gson gson = new Gson();
+ // actions allowed in JSON:
+ private static enum JsonAction {ADD, REMOVE};
+ private final ControlPortActionType action;
+ private final Map topicPartitionOffsetMap;
+ private final Set topics;
+ private final String json;
+
+ private ControlPortAction(String json, ControlPortActionType action, Map topicPartitionOffsetMap) {
+ if (!(action == ControlPortActionType.ADD_ASSIGNMENT || action == ControlPortActionType.REMOVE_ASSIGNMENT)) {
+ throw new IllegalArgumentException ("invalid action: " + action);
+ }
+ this.action = action;
+ this.topicPartitionOffsetMap = topicPartitionOffsetMap;
+ this.topics = null;
+ this.json = json;
+ }
+
+ private ControlPortAction(String json, ControlPortActionType action, Set topics) {
+ if (!(action == ControlPortActionType.ADD_SUBSCRIPTION || action == ControlPortActionType.REMOVE_SUBSCRIPTION)) {
+ throw new IllegalArgumentException ("invalid action: " + action);
+ }
+ this.action = action;
+ this.topicPartitionOffsetMap = null;
+ this.topics = topics;
+ this.json = json;
+ }
+
+ private ControlPortAction (String json) {
+ this.action = ControlPortActionType.NONE;
+ this.topicPartitionOffsetMap = null;
+ this.topics = null;
+ this.json = json;
+ }
+
+ public ControlPortActionType getActionType() {
+ return action;
+ }
+
+ public Map getTopicPartitionOffsetMap() {
+ return topicPartitionOffsetMap;
+ }
+
+ /**
+ * @return the topics
+ */
+ public Set getTopics() {
+ return topics;
+ }
+
+ public String getJson() {
+ return json;
+ }
+
+ @Override
+ public String toString() {
+ switch (action) {
+ case ADD_ASSIGNMENT:
+ case REMOVE_ASSIGNMENT:
+ return " [action=" + action + ", topicPartitionOffsetMap=" + topicPartitionOffsetMap + "]";
+ case ADD_SUBSCRIPTION:
+ case REMOVE_SUBSCRIPTION:
+ return " [action=" + action + ", topics=" + topics + "]";
+ default:
+ return " [action=" + action + ", topicPartitionOffsetMap=" + topicPartitionOffsetMap + ", topic=" + topics + "]";
+ }
+ }
+
+ /**
+ * Creates a ControlPortAction from a JSON formatted String
+ * @param json The JSON string
+ * @return a ControlPortAction object
+ * @throws ControlportJsonParseException parsing JSON failed
+ */
+ public static ControlPortAction fromJSON (String json) throws ControlportJsonParseException {
+ JsonObject jsonObj = null;
+ try {
+ jsonObj = gson.fromJson (json, JsonObject.class);
+ }
+ catch (Exception e) {
+ ControlportJsonParseException exc = new ControlportJsonParseException (e.getMessage(), e);
+ exc.setJson (json);
+ throw exc;
+ }
+
+ if (jsonObj == null) {
+ ControlportJsonParseException exc = new ControlportJsonParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "action", json==null? "null": json));
+ exc.setJson(json);
+ throw exc;
+ }
+ final String jason = jsonObj.toString();
+ ControlPortActionType a = null;
+ JsonAction action = null;
+ if (jsonObj.has ("action")) { //$NON-NLS-1$
+ try {
+ action = JsonAction.valueOf (jsonObj.get ("action").getAsString().toUpperCase()); //$NON-NLS-1$
+ }
+ catch (Exception e) {
+ ControlportJsonParseException exc = new ControlportJsonParseException (e.getMessage(), e);
+ exc.setJson (json);
+ throw exc;
+ }
+ } else {
+ ControlportJsonParseException exc = new ControlportJsonParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "action", json));
+ exc.setJson (json);
+ throw exc;
+ }
+
+ if (jsonObj.has ("topicPartitionOffsets") && jsonObj.has ("topics")) { //$NON-NLS-1$
+ final ControlportJsonParseException exc = new ControlportJsonParseException (Messages.getString ("INVALID_JSON", json));
+ exc.setJson (json);
+ throw exc;
+ }
+ if (!jsonObj.has ("topicPartitionOffsets") && !jsonObj.has ("topics")) { //$NON-NLS-1$
+ trace.warn ("expected \"topicPartitionOffsets\" or \"topics\" element in JSON: " + jason);
+ }
+
+ Map topicPartitionOffsetMap = new HashMap<>();
+ Set topics = new HashSet<>();
+ if (jsonObj.has ("topicPartitionOffsets")) { //$NON-NLS-1$
+ a = action == JsonAction.ADD? ControlPortActionType.ADD_ASSIGNMENT: ControlPortActionType.REMOVE_ASSIGNMENT;
+ JsonArray arr = jsonObj.get ("topicPartitionOffsets").getAsJsonArray(); //$NON-NLS-1$
+ Iterator it = arr.iterator();
+ while (it.hasNext()) {
+ JsonObject tpo = it.next().getAsJsonObject();
+ if(!tpo.has ("topic")) { //$NON-NLS-1$
+ ControlportJsonParseException exc = new ControlportJsonParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "topic", json));
+ exc.setJson (json);
+ throw exc;
+ }
+
+ if(!tpo.has("partition")) { //$NON-NLS-1$
+ ControlportJsonParseException exc = new ControlportJsonParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "partition", json));
+ exc.setJson (json);
+ throw exc;
+ }
+ try {
+ String topic = tpo.get ("topic").getAsString(); //$NON-NLS-1$
+ int partition = tpo.get ("partition").getAsInt(); //$NON-NLS-1$
+ long offset = tpo.has ("offset")? tpo.get ("offset").getAsLong(): OffsetConstants.NO_SEEK; //$NON-NLS-1$ //$NON-NLS-2$
+ topicPartitionOffsetMap.put (new TopicPartition (topic, partition), new Long (offset));
+ }
+ catch (Exception e) {
+ // Handle Number format errors
+ ControlportJsonParseException exc = new ControlportJsonParseException (e.getMessage(), e);
+ exc.setJson (json);
+ throw exc;
+ }
+ }
+ return new ControlPortAction (jason, a, topicPartitionOffsetMap);
+ }
+ if (jsonObj.has ("topics")) {
+ a = action == JsonAction.ADD? ControlPortActionType.ADD_SUBSCRIPTION: ControlPortActionType.REMOVE_SUBSCRIPTION;
+ JsonArray arr = jsonObj.get ("topics").getAsJsonArray(); //$NON-NLS-1$
+ Iterator it = arr.iterator();
+ while (it.hasNext()) {
+ JsonObject tpc = it.next().getAsJsonObject();
+ if(!tpc.has ("topic")) { //$NON-NLS-1$
+ ControlportJsonParseException exc = new ControlportJsonParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "topic", json));
+ exc.setJson (json);
+ throw exc;
+ }
+ try {
+ String topic = tpc.get ("topic").getAsString(); //$NON-NLS-1$
+ topics.add (topic);
+ }
+ catch (Exception e) {
+ // Handle Number format errors
+ ControlportJsonParseException exc = new ControlportJsonParseException (e.getMessage(), e);
+ exc.setJson (json);
+ throw exc;
+ }
+ }
+ return new ControlPortAction (jason, a, topics);
+ }
+ return new ControlPortAction (jason);
+ }
+}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/TopicPartitionUpdateAction.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ControlPortActionType.java
similarity index 79%
rename from com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/TopicPartitionUpdateAction.java
rename to com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ControlPortActionType.java
index e4fbb041..d56f3988 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/TopicPartitionUpdateAction.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/ControlPortActionType.java
@@ -13,9 +13,12 @@
*/
package com.ibm.streamsx.kafka.clients.consumer;
-public enum TopicPartitionUpdateAction {
+public enum ControlPortActionType {
- ADD,
- REMOVE;
+ NONE,
+ ADD_ASSIGNMENT,
+ REMOVE_ASSIGNMENT,
+ ADD_SUBSCRIPTION,
+ REMOVE_SUBSCRIPTION,
+ COMMIT_OFFSETS;
}
-
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaConsumerGroupClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaConsumerGroupClient.java
index de2cd7de..2bbad127 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaConsumerGroupClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaConsumerGroupClient.java
@@ -69,6 +69,7 @@
import com.ibm.streams.operator.state.ConsistentRegionContext;
import com.ibm.streamsx.kafka.KafkaClientInitializationException;
import com.ibm.streamsx.kafka.KafkaConfigurationException;
+import com.ibm.streamsx.kafka.KafkaOperatorNotRegisteredException;
import com.ibm.streamsx.kafka.KafkaOperatorResetFailedException;
import com.ibm.streamsx.kafka.KafkaOperatorRuntimeException;
import com.ibm.streamsx.kafka.MsgFormatter;
@@ -157,6 +158,7 @@ private CrKafkaConsumerGroupClient (OperatorContext operatorContext, Clas
}
trace.info (MsgFormatter.format ("CR timeouts: reset: {0}, drain: {1}", crContext.getResetTimeout(), crContext.getDrainTimeout()));
ClientState newState = ClientState.INITIALIZED;
+ isGroupManagementActive.setValue (1L);
trace.log (DEBUG_LEVEL, MsgFormatter.format ("client state transition: {0} -> {1}", state, newState));
state = newState;
}
@@ -845,10 +847,6 @@ protected void validate() throws Exception {
trace.error (msg);
throw new KafkaConfigurationException (msg);
}
- // test that group-ID is not the generated (random) value
- if (isGroupIdGenerated()) {
- throw new KafkaConfigurationException (getThisClassName() + " cannot be used without specifying the groupId parameter or a group.id consumer property");
- }
if (initialStartPosition == StartPosition.Offset) {
throw new KafkaConfigurationException (getThisClassName() + " does not support startPosition = " + initialStartPosition);
}
@@ -1009,63 +1007,80 @@ private void createSeekOffsetMap (Checkpoint checkpoint) throws InterruptedExcep
trace.warn (MsgFormatter.format ("Operator name in checkpoint ({0}) differs from current operator name: {1}", myOperatorNameInCkpt, operatorName));
}
if (!contributingOperators.contains (operatorName)) {
- trace.error (MsgFormatter.format ("This operator''s name ({0}) not found in contributing operator names: {1}",
- operatorName, contributingOperators));
+ final String msg = MsgFormatter.format ("This operator''s name ({0}) not found in contributing operator names: {1}",
+ operatorName, contributingOperators);
+ trace.error (msg);
+ throw new KafkaOperatorResetFailedException (msg);
}
trace.info (MsgFormatter.format ("contributing {0} partition => offset mappings to the group''s checkpoint.", offsMgr.size()));
- // send checkpoint data to CrGroupCoordinator MXBean and wait for the notification
- // to fetch the group's complete checkpoint. Then, process the group's checkpoint.
- Map partialOffsetMap = new HashMap<>();
- for (TopicPartition tp: offsMgr.getMappedTopicPartitions()) {
- final String topic = tp.topic();
- final int partition = tp.partition();
- final Long offset = offsMgr.getOffset (topic, partition);
- partialOffsetMap.put (new TP (topic, partition), offset);
+ if (contributingOperators.size() == 1) {
+ trace.info ("this single operator participated in consumer group at checkpoint time. Checkpoint merge and distribution via MXBean disabled.");
+ assert (contributingOperators.contains (operatorName));
+ initSeekOffsetMap();
+ for (TopicPartition tp: offsMgr.getMappedTopicPartitions()) {
+ final String topic = tp.topic();
+ final int partition = tp.partition();
+ final Long offset = offsMgr.getOffset (topic, partition);
+ this.seekOffsetMap.put (tp, offset);
+ }
}
+ else {
+ // send checkpoint data to CrGroupCoordinator MXBean and wait for the notification
+ // to fetch the group's complete checkpoint. Then, process the group's checkpoint.
+ Map partialOffsetMap = new HashMap<>();
+ for (TopicPartition tp: offsMgr.getMappedTopicPartitions()) {
+ final String topic = tp.topic();
+ final int partition = tp.partition();
+ final Long offset = offsMgr.getOffset (topic, partition);
+ partialOffsetMap.put (new TP (topic, partition), offset);
+ }
- trace.info (MsgFormatter.format ("Merging my group''s checkpoint contribution: partialOffsetMap = {0}, myOperatorName = {1}",
- partialOffsetMap, operatorName));
- this.crGroupCoordinatorMxBean.mergeConsumerCheckpoint (chkptSeqId, resetAttempt, contributingOperators.size(), partialOffsetMap, operatorName);
+ trace.info (MsgFormatter.format ("Merging my group''s checkpoint contribution: partialOffsetMap = {0}, myOperatorName = {1}",
+ partialOffsetMap, operatorName));
+ this.crGroupCoordinatorMxBean.mergeConsumerCheckpoint (chkptSeqId, resetAttempt, contributingOperators.size(), partialOffsetMap, operatorName);
+
+ // check JMX notification and wait for notification
+ jmxNotificationConditionLock.lock();
+ long waitStartTime= System.currentTimeMillis();
+ // increase timeout exponentially with every reset attempt by 20%
+ // long timeoutMillis = (long)(Math.pow (1.2, resetAttempt) * (double)timeouts.getJmxResetNotificationTimeout());
+ long timeoutMillis = timeouts.getJmxResetNotificationTimeout();
+ boolean waitTimeLeft = true;
+ int nWaits = 0;
+ long timeElapsed = 0;
+ trace.log (DEBUG_LEVEL, MsgFormatter.format ("checking receiption of JMX notification {0} for sequenceId {1}. timeout = {2,number,#} ms.",
+ CrConsumerGroupCoordinatorMXBean.MERGE_COMPLETE_NTF_TYPE, key, timeoutMillis));
+ while (!jmxMergeCompletedNotifMap.containsKey (key) && waitTimeLeft) {
+ long remainingTime = timeoutMillis - timeElapsed;
+ waitTimeLeft = remainingTime > 0;
+ if (waitTimeLeft) {
+ if (nWaits++ %50 == 0) trace.log (DEBUG_LEVEL, MsgFormatter.format ("waiting for JMX notification {0} for sequenceId {1}. Remaining time = {2,number,#} of {3,number,#} ms",
+ CrConsumerGroupCoordinatorMXBean.MERGE_COMPLETE_NTF_TYPE, key, remainingTime, timeoutMillis));
+ jmxNotificationCondition.await (100, TimeUnit.MILLISECONDS);
+ }
+ timeElapsed = System.currentTimeMillis() - waitStartTime;
+ }
- // check JMX notification and wait for notification
- jmxNotificationConditionLock.lock();
- long waitStartTime= System.currentTimeMillis();
- // increase timeout exponentially with every reset attempt by 20%
- // long timeoutMillis = (long)(Math.pow (1.2, resetAttempt) * (double)timeouts.getJmxResetNotificationTimeout());
- long timeoutMillis = timeouts.getJmxResetNotificationTimeout();
- boolean waitTimeLeft = true;
- int nWaits = 0;
- long timeElapsed = 0;
- trace.log (DEBUG_LEVEL, MsgFormatter.format ("checking receiption of JMX notification {0} for sequenceId {1}. timeout = {2,number,#} ms.",
- CrConsumerGroupCoordinatorMXBean.MERGE_COMPLETE_NTF_TYPE, key, timeoutMillis));
- while (!jmxMergeCompletedNotifMap.containsKey (key) && waitTimeLeft) {
- long remainingTime = timeoutMillis - timeElapsed;
- waitTimeLeft = remainingTime > 0;
- if (waitTimeLeft) {
- if (nWaits++ %50 == 0) trace.log (DEBUG_LEVEL, MsgFormatter.format ("waiting for JMX notification {0} for sequenceId {1}. Remaining time = {2,number,#} of {3,number,#} ms",
- CrConsumerGroupCoordinatorMXBean.MERGE_COMPLETE_NTF_TYPE, key, remainingTime, timeoutMillis));
- jmxNotificationCondition.await (100, TimeUnit.MILLISECONDS);
+ CrConsumerGroupCoordinator.CheckpointMerge merge = jmxMergeCompletedNotifMap.get (key);
+ jmxNotificationConditionLock.unlock();
+ if (merge == null) {
+ final String msg = MsgFormatter.format ("timeout receiving {0} JMX notification for {1} from MXBean {2} in JCP. Current timeout is {3,number,#} milliseconds.",
+ CrConsumerGroupCoordinatorMXBean.MERGE_COMPLETE_NTF_TYPE, key, crGroupCoordinatorMXBeanName, timeoutMillis);
+ trace.error (msg);
+ throw new KafkaOperatorResetFailedException (msg);
+ }
+ else {
+ trace.info (MsgFormatter.format ("waiting for JMX notification for sequenceId {0} took {1} ms", key, timeElapsed));
}
- timeElapsed = System.currentTimeMillis() - waitStartTime;
- }
- CrConsumerGroupCoordinator.CheckpointMerge merge = jmxMergeCompletedNotifMap.get (key);
- if (merge == null) {
- final String msg = MsgFormatter.format ("timeout receiving {0} JMX notification for {1} from MXBean {2} in JCP. Current timeout is {3,number,#} milliseconds.",
- CrConsumerGroupCoordinatorMXBean.MERGE_COMPLETE_NTF_TYPE, key, crGroupCoordinatorMXBeanName, timeoutMillis);
- trace.error (msg);
- throw new KafkaOperatorResetFailedException (msg);
- }
- else {
- trace.info (MsgFormatter.format ("waiting for JMX notification for sequenceId {0} took {1} ms", key, timeElapsed));
- }
- Map mergedOffsetMap = merge.getConsolidatedOffsetMap();
- trace.info ("reset offsets (group's checkpoint) received from MXBean: " + mergedOffsetMap);
+ Map mergedOffsetMap = merge.getConsolidatedOffsetMap();
+ trace.info ("reset offsets (group's checkpoint) received from MXBean: " + mergedOffsetMap);
- initSeekOffsetMap();
- mergedOffsetMap.forEach ((tp, offset) -> {
- this.seekOffsetMap.put (new TopicPartition (tp.getTopic(), tp.getPartition()), offset);
- });
+ initSeekOffsetMap();
+ mergedOffsetMap.forEach ((tp, offset) -> {
+ this.seekOffsetMap.put (new TopicPartition (tp.getTopic(), tp.getPartition()), offset);
+ });
+ }
}
catch (InterruptedException e) {
trace.log (DEBUG_LEVEL, "createSeekOffsetMap(): interrupted waiting for the JMX notification");
@@ -1075,9 +1090,6 @@ private void createSeekOffsetMap (Checkpoint checkpoint) throws InterruptedExcep
trace.error ("reset failed: " + e.getLocalizedMessage());
throw new KafkaOperatorResetFailedException (MsgFormatter.format ("resetting operator {0} to checkpoint sequence ID {1} failed: {2}", getOperatorContext().getName(), chkptSeqId, e.getLocalizedMessage()), e);
}
- finally {
- jmxNotificationConditionLock.unlock();
- }
trace.log (DEBUG_LEVEL, "createSeekOffsetMap(): seekOffsetMap = " + this.seekOffsetMap);
}
@@ -1113,7 +1125,9 @@ protected void processCheckpointEvent (Checkpoint checkpoint) {
final String myOperatorName = getOperatorContext().getName();
if (ENABLE_CHECK_REGISTERED_ON_CHECKPOINT) {
if (!registeredConsumers.contains (myOperatorName)) {
- trace.error (MsgFormatter.format ("My operator name not registered in group MXBean: {0}", myOperatorName));
+ final String msg = MsgFormatter.format ("My operator name not registered in group MXBean: {0}", myOperatorName);
+ trace.error (msg);
+ throw new KafkaOperatorNotRegisteredException(msg);
}
}
ObjectOutputStream oStream = checkpoint.getOutputStream();
@@ -1126,7 +1140,7 @@ protected void processCheckpointEvent (Checkpoint checkpoint) {
trace.log (DEBUG_LEVEL, "data written to checkpoint: assignedPartitionsOffsetManager = " + this.assignedPartitionsOffsetManager);
}
} catch (Exception e) {
- throw new RuntimeException (e.getLocalizedMessage(), e);
+ throw new KafkaOperatorRuntimeException(e.getMessage(), e);
}
trace.log (DEBUG_LEVEL, "processCheckpointEvent() - exiting.");
}
@@ -1138,11 +1152,12 @@ protected void processCheckpointEvent (Checkpoint checkpoint) {
* This method should not be called because operator control port and this client implementation are incompatible.
* A context check should exist to detect this mis-configuration.
* We only log the method call.
- * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processUpdateAssignmentEvent(com.ibm.streamsx.kafka.clients.consumer.TopicPartitionUpdate)
+ * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processControlPortActionEvent(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
*/
@Override
- protected void processUpdateAssignmentEvent (TopicPartitionUpdate update) {
- trace.warn("processUpdateAssignmentEvent(): update = " + update + "; update of assignments not supported by this client: " + getThisClassName());
+ protected void processControlPortActionEvent (ControlPortAction update) {
+ trace.warn("processControlPortActionEvent(): update = " + update + "; update of assignments/subscription not supported by this client: " + getThisClassName());
+ nFailedControlTuples.increment();
}
@@ -1265,7 +1280,7 @@ public void onShutdown (long timeout, TimeUnit timeUnit) throws InterruptedExcep
/**
* The builder for the consumer client following the builder pattern.
*/
- public static class Builder {
+ public static class Builder implements ConsumerClientBuilder {
private OperatorContext operatorContext;
private Class> keyClass;
@@ -1283,7 +1298,8 @@ public final Builder setOperatorContext(OperatorContext c) {
}
public final Builder setKafkaProperties(KafkaOperatorProperties p) {
- this.kafkaProperties = p;
+ this.kafkaProperties = new KafkaOperatorProperties();
+ this.kafkaProperties.putAll (p);
return this;
}
@@ -1334,13 +1350,21 @@ public final Builder setInitialStartTimestamp(long initialStartTimestamp) {
* @return A new ConsumerClient instance
* @throws Exception
*/
+ @Override
public ConsumerClient build() throws Exception {
- CrKafkaConsumerGroupClient client = new CrKafkaConsumerGroupClient (operatorContext, keyClass, valueClass, kafkaProperties, singleTopic);
+ KafkaOperatorProperties p = new KafkaOperatorProperties();
+ p.putAll (this.kafkaProperties);
+ CrKafkaConsumerGroupClient client = new CrKafkaConsumerGroupClient (operatorContext, keyClass, valueClass, p, singleTopic);
client.setPollTimeout (this.pollTimeout);
client.setTriggerCount (this.triggerCount);
client.setInitialStartPosition (this.initialStartPosition);
client.setInitialStartTimestamp (this.initialStartTimestamp);
return client;
}
+
+ @Override
+ public int getImplementationMagic() {
+ return CrKafkaConsumerGroupClient.class.getName().hashCode();
+ }
}
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaStaticAssignConsumerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaStaticAssignConsumerClient.java
index 9d1bc6b6..2f34981e 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaStaticAssignConsumerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/CrKafkaStaticAssignConsumerClient.java
@@ -13,6 +13,7 @@
*/
package com.ibm.streamsx.kafka.clients.consumer;
+import java.io.IOException;
import java.time.Duration;
import java.util.Base64;
import java.util.Collection;
@@ -25,6 +26,7 @@
import java.util.regex.Pattern;
import org.apache.commons.lang3.SerializationUtils;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.PartitionInfo;
@@ -61,6 +63,7 @@ private CrKafkaStaticAssignConsumerClient (OperatorContext operatorContex
super (operatorContext, keyClass, valueClass, kafkaProperties);
offsetManager = new OffsetManager();
+ isGroupManagementActive.setValue (0L);
}
/**
@@ -83,6 +86,21 @@ public void setTriggerCount (long triggerCount) {
this.triggerCount = triggerCount;
}
+ /**
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#supports(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
+ */
+ @Override
+ public boolean supports (ControlPortAction action) {
+ switch (action.getActionType()) {
+ case ADD_ASSIGNMENT:
+ case REMOVE_ASSIGNMENT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+
/**
* @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#validate()
*/
@@ -258,12 +276,12 @@ public void postSubmit (ConsumerRecord, ?> submittedRecord) {
}
/**
- * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processUpdateAssignmentEvent(com.ibm.streamsx.kafka.clients.consumer.TopicPartitionUpdate)
+ * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processControlPortActionEvent(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
*/
@Override
- protected void processUpdateAssignmentEvent(TopicPartitionUpdate update) {
+ protected void processControlPortActionEvent(ControlPortAction update) {
// trace with info. to see this method call is important, and it happens not frequently.
- logger.info ("processUpdateAssignmentEvent(): update = " + update);
+ logger.info ("processControlPortActionEvent(): update = " + update);
try {
// create a map of current topic partitions and their offsets
Map currentTopicPartitionOffsets = new HashMap();
@@ -271,8 +289,8 @@ protected void processUpdateAssignmentEvent(TopicPartitionUpdate update) {
Set topicPartitions = getConsumer().assignment();
topicPartitions.forEach(tp -> currentTopicPartitionOffsets.put(tp, getConsumer().position(tp)));
- switch (update.getAction()) {
- case ADD:
+ switch (update.getActionType()) {
+ case ADD_ASSIGNMENT:
update.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
// offset can be -2, -1, or a valid offset o >= 0
// -2 means 'seek to beginning', -1 means 'seek to end'
@@ -287,7 +305,7 @@ protected void processUpdateAssignmentEvent(TopicPartitionUpdate update) {
createJcpCvFromOffsetManagerl();
}
break;
- case REMOVE:
+ case REMOVE_ASSIGNMENT:
update.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
currentTopicPartitionOffsets.remove(tp);
});
@@ -305,7 +323,7 @@ protected void processUpdateAssignmentEvent(TopicPartitionUpdate update) {
}
break;
default:
- throw new Exception ("processUpdateAssignmentEvent: unimplemented action: " + update.getAction());
+ throw new Exception ("processControlPortActionEvent: unimplemented action: " + update.getActionType());
}
} catch (Exception e) {
throw new RuntimeException (e.getLocalizedMessage(), e);
@@ -500,7 +518,7 @@ protected void processCheckpointEvent (Checkpoint checkpoint) {
if (logger.isEnabledFor (DEBUG_LEVEL)) {
logger.log (DEBUG_LEVEL, "offsetManager=" + offsetManager); //$NON-NLS-1$
}
- } catch (Exception e) {
+ } catch (IOException e) {
throw new RuntimeException (e.getLocalizedMessage(), e);
}
logger.log (DEBUG_LEVEL, "processCheckpointEvent() - exiting");
@@ -518,7 +536,7 @@ public void onCheckpointRetire (long id) {
/**
* The builder for the consumer client following the builder pattern.
*/
- public static class Builder {
+ public static class Builder implements ConsumerClientBuilder {
private OperatorContext operatorContext;
private Class> keyClass;
@@ -533,7 +551,8 @@ public final Builder setOperatorContext(OperatorContext c) {
}
public final Builder setKafkaProperties(KafkaOperatorProperties p) {
- this.kafkaProperties = p;
+ this.kafkaProperties = new KafkaOperatorProperties();
+ this.kafkaProperties.putAll (p);
return this;
}
@@ -557,11 +576,23 @@ public final Builder setTriggerCount (long c) {
return this;
}
+ @Override
public ConsumerClient build() throws Exception {
- CrKafkaStaticAssignConsumerClient client = new CrKafkaStaticAssignConsumerClient (operatorContext, keyClass, valueClass, kafkaProperties);
+ KafkaOperatorProperties p = new KafkaOperatorProperties();
+ p.putAll (this.kafkaProperties);
+ if (p.containsKey (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG)) {
+ p.remove (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
+ logger.warn (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG + " removed from consumer configuration (group management disabled)");
+ }
+ CrKafkaStaticAssignConsumerClient client = new CrKafkaStaticAssignConsumerClient (operatorContext, keyClass, valueClass, p);
client.setPollTimeout (pollTimeout);
client.setTriggerCount (triggerCount);
return client;
}
+
+ @Override
+ public int getImplementationMagic() {
+ return CrKafkaStaticAssignConsumerClient.class.getName().hashCode();
+ }
}
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/DummyConsumerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/DummyConsumerClient.java
new file mode 100644
index 00000000..8781dc31
--- /dev/null
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/DummyConsumerClient.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.ibm.streamsx.kafka.clients.consumer;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
+
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+
+import com.ibm.streams.operator.OperatorContext;
+import com.ibm.streams.operator.state.Checkpoint;
+import com.ibm.streamsx.kafka.KafkaClientInitializationException;
+import com.ibm.streamsx.kafka.clients.AbstractKafkaClient;
+import com.ibm.streamsx.kafka.properties.KafkaOperatorProperties;
+
+/**
+ * This class represents an ConsumerClient implementation that behaves passive.
+ * It does not fetch messages and does not provide tuples.
+ *
+ * @author The IBM Kafka toolkit maintainers
+ * @since toolkit 3.0
+ */
+public class DummyConsumerClient extends AbstractKafkaClient implements ConsumerClient {
+
+ private boolean processing = false;
+
+
+ public DummyConsumerClient (OperatorContext operatorContext, KafkaOperatorProperties kafkaProperties) {
+ super (operatorContext, kafkaProperties, true);
+ operatorContext.getMetrics().getCustomMetric ("isGroupManagementActive").setValue (0L);
+ }
+
+ /**
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#startConsumer()
+ */
+ @Override
+ public void startConsumer() throws InterruptedException, KafkaClientInitializationException {
+ processing = true;
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#subscribeToTopics(java.util.Collection, java.util.Collection, com.ibm.streamsx.kafka.clients.consumer.StartPosition)
+ */
+ @Override
+ public void subscribeToTopics (Collection topics, Collection partitions, StartPosition startPosition) throws Exception {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#subscribeToTopicsWithTimestamp(java.util.Collection, java.util.Collection, long)
+ */
+ @Override
+ public void subscribeToTopicsWithTimestamp (Collection topics, Collection partitions, long timestamp) throws Exception {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#subscribeToTopicsWithOffsets(java.lang.String, java.util.List, java.util.List)
+ */
+ @Override
+ public void subscribeToTopicsWithOffsets (String topic, List partitions, List startOffsets) throws Exception {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#subscribeToTopicsWithTimestamp(java.util.regex.Pattern, long)
+ */
+ @Override
+ public void subscribeToTopicsWithTimestamp (Pattern pattern, long timestamp) throws Exception {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#subscribeToTopics(java.util.regex.Pattern, com.ibm.streamsx.kafka.clients.consumer.StartPosition)
+ */
+ @Override
+ public void subscribeToTopics (Pattern pattern, StartPosition startPosition) throws Exception {
+ }
+
+ /**
+ * @return false
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#isSubscribedOrAssigned()
+ */
+ @Override
+ public boolean isSubscribedOrAssigned() {
+ return false;
+ }
+
+ /**
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#supports(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
+ */
+ @Override
+ public boolean supports (ControlPortAction action) {
+ return false;
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#sendStartPollingEvent()
+ */
+ @Override
+ public void sendStartPollingEvent() {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#sendStopPollingEvent()
+ */
+ @Override
+ public void sendStopPollingEvent() throws InterruptedException {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onControlPortAction(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
+ */
+ @Override
+ public void onControlPortAction (ControlPortAction update) throws InterruptedException {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onDrain()
+ */
+ @Override
+ public void onDrain() throws Exception {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onCheckpointRetire(long)
+ */
+ @Override
+ public void onCheckpointRetire (long id) {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onCheckpoint(com.ibm.streams.operator.state.Checkpoint)
+ */
+ @Override
+ public void onCheckpoint (Checkpoint checkpoint) throws InterruptedException {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onReset(com.ibm.streams.operator.state.Checkpoint)
+ */
+ @Override
+ public void onReset (Checkpoint checkpoint) throws InterruptedException {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onResetToInitialState()
+ */
+ @Override
+ public void onResetToInitialState() throws InterruptedException {
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onShutdown(long, java.util.concurrent.TimeUnit)
+ */
+ @Override
+ public void onShutdown (long timeout, TimeUnit timeUnit) throws InterruptedException {
+ processing = false;
+ }
+
+ /**
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#isProcessing()
+ */
+ @Override
+ public boolean isProcessing() {
+ return processing;
+ }
+
+ /**
+ * @return null
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#getNextRecord(long, java.util.concurrent.TimeUnit)
+ */
+ @Override
+ public ConsumerRecord, ?> getNextRecord (long timeout, TimeUnit timeUnit) throws InterruptedException {
+ Thread.sleep (timeUnit.toMillis (timeout));
+ return null;
+ }
+
+ /**
+ * Empty implementation
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#postSubmit(org.apache.kafka.clients.consumer.ConsumerRecord)
+ */
+ @Override
+ public void postSubmit (ConsumerRecord, ?> submittedRecord) {
+ }
+
+
+ /**
+ * The builder for the consumer client following the builder pattern.
+ */
+ public static class Builder implements ConsumerClientBuilder {
+
+ private KafkaOperatorProperties kafkaProperties;
+ private OperatorContext operatorContext;
+
+ public final Builder setOperatorContext(OperatorContext c) {
+ this.operatorContext = c;
+ return this;
+ }
+
+ public final Builder setKafkaProperties(KafkaOperatorProperties p) {
+ this.kafkaProperties = new KafkaOperatorProperties();
+ this.kafkaProperties.putAll (p);
+ return this;
+ }
+
+ @Override
+ public ConsumerClient build() throws Exception {
+ KafkaOperatorProperties p = new KafkaOperatorProperties();
+ p.putAll (this.kafkaProperties);
+ return new DummyConsumerClient (operatorContext, p);
+ }
+
+ @Override
+ public int getImplementationMagic() {
+ return DummyConsumerClient.class.getName().hashCode();
+ }
+ }
+}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/Event.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/Event.java
index 1b633fd8..48cc7b93 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/Event.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/Event.java
@@ -27,7 +27,7 @@
public class Event {
public static enum EventType {
- START_POLLING, STOP_POLLING, CHECKPOINT, RESET, RESET_TO_INIT, SHUTDOWN, UPDATE_ASSIGNMENT, COMMIT_OFFSETS;
+ START_POLLING, STOP_POLLING, CHECKPOINT, RESET, RESET_TO_INIT, SHUTDOWN, CONTROLPORT_EVENT, COMMIT_OFFSETS;
};
private EventType eventType;
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerClient.java
index 9ca9b3e6..7046f864 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerClient.java
@@ -14,7 +14,7 @@
package com.ibm.streamsx.kafka.clients.consumer;
import java.io.IOException;
-import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
@@ -23,13 +23,13 @@
import java.util.Set;
import java.util.regex.Pattern;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.log4j.Logger;
import com.ibm.streams.operator.OperatorContext;
import com.ibm.streams.operator.state.Checkpoint;
import com.ibm.streams.operator.state.CheckpointContext.Kind;
-import com.ibm.streamsx.kafka.Features;
import com.ibm.streamsx.kafka.KafkaOperatorException;
import com.ibm.streamsx.kafka.KafkaOperatorResetFailedException;
import com.ibm.streamsx.kafka.KafkaOperatorRuntimeException;
@@ -58,6 +58,7 @@ public class NonCrKafkaConsumerClient extends AbstractNonCrKafkaConsumerClient {
private NonCrKafkaConsumerClient (OperatorContext operatorContext, Class keyClass, Class valueClass,
KafkaOperatorProperties kafkaProperties) throws KafkaOperatorException {
super (operatorContext, keyClass, valueClass, kafkaProperties);
+ isGroupManagementActive.setValue (0L);
}
@@ -110,18 +111,13 @@ public void subscribeToTopics (Collection topics, Collection pa
}
assign (partsToAssign);
if (getInitialStartPosition() != StartPosition.Default) {
- if (Features.ENABLE_NOCR_NO_CONSUMER_SEEK_AFTER_RESTART) {
- testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, startPosition);
- // JCP connected, seek when partition not yet committed
- for (TopicPartition tp: partsToAssign) {
- if (!isCommittedForPartition (tp)) {
- seekToPosition (tp, startPosition);
- }
+ testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, startPosition);
+ // JCP connected, seek when partition not yet committed
+ for (TopicPartition tp: partsToAssign) {
+ if (!isCommittedForPartition (tp)) {
+ seekToPosition (tp, startPosition);
}
}
- else {
- seekToPosition (partsToAssign, startPosition);
- }
}
resetCommitPeriod (System.currentTimeMillis());
}
@@ -156,20 +152,14 @@ public void subscribeToTopicsWithTimestamp (Collection topics, Collectio
final Set topicPartitions = topicPartitionTimestampMap.keySet();
assign (topicPartitions);
-
- if (Features.ENABLE_NOCR_NO_CONSUMER_SEEK_AFTER_RESTART) {
- testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, getInitialStartPosition());
- // JCP connected, seek when partition not yet committed
- // do not evaluate PE.getRelaunchCount() to decide seek. It is 0 when the width of a parallel region has changed.
- for (TopicPartition tp: topicPartitions) {
- if (!isCommittedForPartition (tp)) {
- seekToTimestamp (tp, timestamp);
- }
+ testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, getInitialStartPosition());
+ // JCP connected, seek when partition not yet committed
+ // do not evaluate PE.getRelaunchCount() to decide seek. It is 0 when the width of a parallel region has changed.
+ for (TopicPartition tp: topicPartitions) {
+ if (!isCommittedForPartition (tp)) {
+ seekToTimestamp (tp, timestamp);
}
}
- else {
- seekToTimestamp (topicPartitionTimestampMap);
- }
resetCommitPeriod (System.currentTimeMillis());
}
@@ -196,20 +186,15 @@ public void subscribeToTopicsWithOffsets (String topic, List partitions
for (int partitionNo: partitions) {
topicPartitionOffsetMap.put (new TopicPartition (topic, partitionNo), startOffsets.get(i++));
}
- if (Features.ENABLE_NOCR_NO_CONSUMER_SEEK_AFTER_RESTART) {
- testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, getInitialStartPosition());
- // JCP connected, seek when partition not yet committed
- // do not evaluate PE.getRelaunchCount() to decide seek. It is 0 when the width of a parallel region has changed.
- assign (topicPartitionOffsetMap.keySet());
- for (TopicPartition tp: topicPartitionOffsetMap.keySet()) {
- if (!isCommittedForPartition (tp)) {
- getConsumer().seek (tp, topicPartitionOffsetMap.get (tp).longValue());
- }
+ testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, getInitialStartPosition());
+ // JCP connected, seek when partition not yet committed
+ // do not evaluate PE.getRelaunchCount() to decide seek. It is 0 when the width of a parallel region has changed.
+ assign (topicPartitionOffsetMap.keySet());
+ for (TopicPartition tp: topicPartitionOffsetMap.keySet()) {
+ if (!isCommittedForPartition (tp)) {
+ getConsumer().seek (tp, topicPartitionOffsetMap.get (tp).longValue());
}
}
- else {
- assignToPartitionsWithOffsets (topicPartitionOffsetMap);
- }
resetCommitPeriod (System.currentTimeMillis());
}
@@ -222,15 +207,10 @@ public void subscribeToTopicsWithOffsets (String topic, List partitions
@Override
@SuppressWarnings("unchecked")
protected void processResetEvent (Checkpoint checkpoint) {
- if (getOperatorContext().getNumberOfStreamingInputs() == 0) {
- trace.debug ("processResetEvent() - ignored");
- return;
- }
final long chkptSeqId = checkpoint.getSequenceId();
trace.log (DEBUG_LEVEL, "processResetEvent() - entering. seq = " + chkptSeqId);
try {
- final ObjectInputStream inputStream = checkpoint.getInputStream();
- final Set partitions = (Set ) inputStream.readObject();
+ final Set partitions = (Set ) checkpoint.getInputStream().readObject();
trace.info ("topic partitions from checkpoint = " + partitions);
// only assign, fetch offset is last committed offset.
assign (partitions);
@@ -259,79 +239,111 @@ protected void processCheckpointEvent (Checkpoint checkpoint) {
/**
- * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processUpdateAssignmentEvent(com.ibm.streamsx.kafka.clients.consumer.TopicPartitionUpdate)
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#supports(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
*/
@Override
- protected void processUpdateAssignmentEvent(TopicPartitionUpdate update) {
+ public boolean supports (ControlPortAction action) {
+ switch (action.getActionType()) {
+ case ADD_ASSIGNMENT:
+ case REMOVE_ASSIGNMENT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+
+ /**
+ * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processControlPortActionEvent(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
+ */
+ @Override
+ protected void processControlPortActionEvent(ControlPortAction action) {
try {
+ final ControlPortActionType actionType = action.getActionType();
+ if (actionType == ControlPortActionType.ADD_ASSIGNMENT || actionType == ControlPortActionType.REMOVE_ASSIGNMENT) {
+ trace.info ("action: " + action);
+ } else if (trace.isDebugEnabled()) {
+ trace.debug ("action: " + action);
+ }
// create a map of current topic partitions and their fetch offsets for next record
Map currentTopicPartitionOffsets = new HashMap();
Set topicPartitions = getConsumer().assignment();
topicPartitions.forEach(tp -> currentTopicPartitionOffsets.put(tp, getConsumer().position(tp)));
- switch (update.getAction()) {
- case ADD:
- update.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
+ boolean doNewAssign = false;
+ switch (actionType) {
+ case ADD_ASSIGNMENT:
+ action.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
// offset can be -2, -1, or a valid offset o >= 0
// -2 means 'seek to beginning', -1 means 'seek to end'
currentTopicPartitionOffsets.put(tp, offset);
});
- assignToPartitionsWithOffsets (currentTopicPartitionOffsets);
- trace.info ("assigned partitions after ADD: " + currentTopicPartitionOffsets);
- // No need to update offset manager here, like adding topics, etc.
- // Missing topics in the offset manager are auto-created
- CommitInfo commits = new CommitInfo (true, false);
- // Immediately commit the fetch offsets of _only_the_added_ topic partitions
- update.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
- // do not put 'offset' into the commits; 'offset' can be -1 or -2 for 'end' or 'begin'
- commits.put(tp, getConsumer().position (tp));
- });
- commitOffsets (commits);
- trace.info ("committed offsets of the added topic partitions: " + commits);
+ doNewAssign = currentTopicPartitionOffsets.size() > 0;
+ if (!doNewAssign) {
+ trace.info ("topic partition assignment unchanged: " + currentTopicPartitionOffsets);
+ } else {
+ assignToPartitionsWithOffsets (currentTopicPartitionOffsets);
+ trace.info ("assigned partitions after ADD: " + currentTopicPartitionOffsets);
+ // No need to update offset manager here, like adding topics, etc.
+ // Missing topics in the offset manager are auto-created
+ CommitInfo commits = new CommitInfo (true, false);
+ // Immediately commit the fetch offsets of _only_the_added_ topic partitions
+ action.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
+ // do not put 'offset' into the commits; 'offset' can be -1 or -2 for 'end' or 'begin'
+ commits.put(tp, getConsumer().position (tp));
+ });
+ commitOffsets (commits);
+ trace.info ("committed offsets of the added topic partitions: " + commits);
+ }
break;
- case REMOVE:
+ case REMOVE_ASSIGNMENT:
// x 1. remove messages of the removed topic partitions from the queue - they are all uncommitted
// x 2. wait that the queue gets processed - awaitMessageQueueProcessed();
// x 3. commit the offsets of the removed topic partitions
// x 4. remove the unassigned topic partitions from the offsetManager (or simply clear?)
// x 5. update the partition assignment in the consumer
// remove messages of removed topic partitions from the message queue
- getMessageQueue().removeIf (record -> belongsToPartition (record, update.getTopicPartitionOffsetMap().keySet()));
+ getMessageQueue().removeIf (record -> belongsToPartition (record, action.getTopicPartitionOffsetMap().keySet()));
awaitMessageQueueProcessed();
// now the offset manager can be cleaned without the chance that the removed partition(s) re-appear after tuple submission
// remove removed partitions from offset manager. We can't commit offsets for those partitions we are not assigned any more.
// the post-condition is, that all messages from the queue have submitted as
// tuples and its offsets +1 are stored in OffsetManager.
-
+
final boolean commitSync = true;
final boolean commitPartitionWise = false;
CommitInfo commitOffsets = new CommitInfo (commitSync, commitPartitionWise);
OffsetManager offsetManager = getOffsetManager();
-
+
synchronized (offsetManager) {
- update.getTopicPartitionOffsetMap().forEach ((tp, offsetIrrelevant) -> {
+ for (TopicPartition tp: action.getTopicPartitionOffsetMap().keySet()) {
// make sure that we commit only partitions that are assigned
if (currentTopicPartitionOffsets.containsKey (tp)) {
+ doNewAssign = true;
long offset = offsetManager.getOffset (tp.topic(), tp.partition());
// offset is -1 if there is no mapping from topic partition to offset
if (offset >= 0) commitOffsets.put (tp, offset);
currentTopicPartitionOffsets.remove (tp);
}
offsetManager.remove (tp.topic(), tp.partition());
- });
+ }
}
if (!commitOffsets.isEmpty()) {
commitOffsets (commitOffsets);
}
// we can end up here with an empty map after removal of assignments.
- assignToPartitionsWithOffsets (currentTopicPartitionOffsets);
- trace.info ("assigned partitions after REMOVE: " + currentTopicPartitionOffsets);
+ if (doNewAssign) {
+ assignToPartitionsWithOffsets (currentTopicPartitionOffsets);
+ trace.info ("assigned partitions after REMOVE: " + currentTopicPartitionOffsets);
+ } else {
+ trace.info ("topic partition assignment unchanged: " + currentTopicPartitionOffsets);
+ }
break;
default:
- throw new Exception ("processUpdateAssignmentEvent(): unimplemented action: " + update.getAction());
+ throw new Exception ("processControlPortActionEvent(): unimplemented action: " + actionType);
}
// getChkptContext().getKind() is not reported properly. Streams Build 20180710104900 (4.3.0.0) never returns OPERATOR_DRIVEN
- if (getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
+ if (doNewAssign && getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
trace.info ("initiating checkpointing with current partition assignment");
// createCheckpoint() throws IOException
boolean result = getChkptContext().createCheckpoint();
@@ -356,10 +368,11 @@ public void onCheckpoint (Checkpoint checkpoint) throws InterruptedException {
if (getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
try {
// do not send an event here. In case of operator driven checkpoint it will never be processed (deadlock)
- checkpoint.getOutputStream().writeObject (getAssignedPartitions());
+ final ObjectOutputStream outputStream = checkpoint.getOutputStream();
+ outputStream.writeObject (getAssignedPartitions());
trace.info ("topic partitions written into checkpoint: " + getAssignedPartitions());
} catch (IOException e) {
- throw new RuntimeException (e.getLocalizedMessage(), e);
+ throw new RuntimeException (e.getMessage(), e);
}
}
else {
@@ -390,12 +403,10 @@ public void onReset (Checkpoint checkpoint) throws InterruptedException {
}
-
-
/**
* The builder for the consumer client following the builder pattern.
*/
- public static class Builder {
+ public static class Builder implements ConsumerClientBuilder {
private OperatorContext operatorContext;
private Class> keyClass;
@@ -413,7 +424,8 @@ public final Builder setOperatorContext(OperatorContext c) {
}
public final Builder setKafkaProperties(KafkaOperatorProperties p) {
- this.kafkaProperties = p;
+ this.kafkaProperties = new KafkaOperatorProperties();
+ this.kafkaProperties.putAll (p);
return this;
}
@@ -452,8 +464,15 @@ public final Builder setInitialStartPosition (StartPosition p) {
return this;
}
+ @Override
public ConsumerClient build() throws Exception {
- NonCrKafkaConsumerClient client = new NonCrKafkaConsumerClient (operatorContext, keyClass, valueClass, kafkaProperties);
+ KafkaOperatorProperties p = new KafkaOperatorProperties();
+ p.putAll (this.kafkaProperties);
+ if (p.containsKey (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG)) {
+ p.remove (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
+ trace.warn (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG + " removed from consumer configuration (group management disabled)");
+ }
+ NonCrKafkaConsumerClient client = new NonCrKafkaConsumerClient (operatorContext, keyClass, valueClass, p);
client.setPollTimeout (pollTimeout);
client.setCommitMode (commitMode);
client.setCommitCount (commitCount);
@@ -461,5 +480,10 @@ public ConsumerClient build() throws Exception {
client.setInitialStartPosition (initialStartPosition);
return client;
}
+
+ @Override
+ public int getImplementationMagic() {
+ return NonCrKafkaConsumerClient.class.getName().hashCode();
+ }
}
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerGroupClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerGroupClient.java
index 3b190a51..596828e3 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerGroupClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/NonCrKafkaConsumerGroupClient.java
@@ -13,7 +13,10 @@
*/
package com.ibm.streamsx.kafka.clients.consumer;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
import java.util.Collection;
+import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
@@ -26,9 +29,10 @@
import com.ibm.streams.operator.OperatorContext;
import com.ibm.streams.operator.state.Checkpoint;
-import com.ibm.streamsx.kafka.Features;
+import com.ibm.streams.operator.state.CheckpointContext.Kind;
import com.ibm.streamsx.kafka.KafkaConfigurationException;
import com.ibm.streamsx.kafka.KafkaOperatorException;
+import com.ibm.streamsx.kafka.KafkaOperatorResetFailedException;
import com.ibm.streamsx.kafka.KafkaOperatorRuntimeException;
import com.ibm.streamsx.kafka.MsgFormatter;
import com.ibm.streamsx.kafka.clients.OffsetManager;
@@ -59,6 +63,7 @@ public class NonCrKafkaConsumerGroupClient extends AbstractNonCrKafkaConsumerCli
private NonCrKafkaConsumerGroupClient (OperatorContext operatorContext, Class keyClass, Class valueClass,
KafkaOperatorProperties kafkaProperties, boolean singleTopic) throws KafkaOperatorException {
super (operatorContext, keyClass, valueClass, kafkaProperties);
+ isGroupManagementActive.setValue (1L);
// if no partition assignment strategy is specified, set the round-robin when multiple topics can be subscribed
if (!(singleTopic || kafkaProperties.containsKey (ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG))) {
@@ -71,6 +76,22 @@ private NonCrKafkaConsumerGroupClient (OperatorContext operatorContext, C
}
}
+
+ /**
+ * @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#supports(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
+ */
+ @Override
+ public boolean supports (ControlPortAction action) {
+ switch (action.getActionType()) {
+ case ADD_SUBSCRIPTION:
+ case REMOVE_SUBSCRIPTION:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+
/**
* Subscription with pattern not supported by this client implementation.
* @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#subscribeToTopicsWithTimestamp(java.util.regex.Pattern, long)
@@ -78,14 +99,12 @@ private NonCrKafkaConsumerGroupClient (OperatorContext operatorContext, C
@Override
public void subscribeToTopicsWithTimestamp (Pattern pattern, long timestamp) throws Exception {
trace.info (MsgFormatter.format ("subscribeToTopicsWithTimestamp: pattern = {0}, timestamp = {1}",
- pattern == null? "null": pattern.pattern(), timestamp));
+ pattern == null? "null": pattern.pattern(), timestamp));
assert getInitialStartPosition() == StartPosition.Time;
this.initialStartTimestamp = timestamp;
subscribe (pattern, this);
// we seek in onPartitionsAssigned()
- if (Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION) {
- testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, StartPosition.Time);
- }
+ testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, StartPosition.Time);
resetCommitPeriod (System.currentTimeMillis());
}
@@ -102,7 +121,7 @@ public void subscribeToTopics (Pattern pattern, StartPosition startPosition) thr
assert getInitialStartPosition() == startPosition;
subscribe (pattern, this);
// we seek in onPartitionsAssigned()
- if (startPosition != StartPosition.Default && Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION) {
+ if (startPosition != StartPosition.Default) {
testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, startPosition);
}
resetCommitPeriod (System.currentTimeMillis());
@@ -133,7 +152,7 @@ public void subscribeToTopics (Collection topics, Collection pa
}
subscribe (topics, this);
// we seek in onPartitionsAssigned()
- if (startPosition != StartPosition.Default && Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION) {
+ if (startPosition != StartPosition.Default) {
testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, startPosition);
}
resetCommitPeriod (System.currentTimeMillis());
@@ -164,9 +183,7 @@ public void subscribeToTopicsWithTimestamp (Collection topics, Collectio
this.initialStartTimestamp = timestamp;
subscribe (topics, this);
// we seek in onPartitionsAssigned()
- if (Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION) {
- testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, StartPosition.Time);
- }
+ testForJobControlPlaneOrThrow (JCP_CONNECT_TIMEOUT_MILLIS, StartPosition.Time);
resetCommitPeriod (System.currentTimeMillis());
}
@@ -251,19 +268,11 @@ public void onPartitionsAssigned (Collection partitions) {
break;
case Beginning:
case End:
- if (!Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION) {
- // here we must never end when the feature is not enabled
- throw new KafkaOperatorRuntimeException ("Illegal startposition for this consumer client implementation: " + startPos);
- }
if (!isCommittedForPartition (tp)) {
seekToPosition (tp, startPos);
}
break;
case Time:
- if (!Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION) {
- // here we must never end when the feature is not enabled
- throw new KafkaOperatorRuntimeException ("Illegal startposition for this consumer client implementation: " + startPos);
- }
if (!isCommittedForPartition (tp)) {
seekToTimestamp (tp, this.initialStartTimestamp);
}
@@ -292,35 +301,156 @@ public void onPartitionsAssigned (Collection partitions) {
* This method should not be called because operator control port and this client implementation are incompatible.
* A context check should exist to detect this mis-configuration.
* We only log the method call.
- * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processUpdateAssignmentEvent(com.ibm.streamsx.kafka.clients.consumer.TopicPartitionUpdate)
+ * @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processControlPortActionEvent(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
*/
@Override
- protected void processUpdateAssignmentEvent (TopicPartitionUpdate update) {
- trace.error("processUpdateAssignmentEvent(): update = " + update + "; update of assignments not supported by this client: " + getThisClassName());
+ protected void processControlPortActionEvent (ControlPortAction action) {
+ try {
+ final ControlPortActionType actionType = action.getActionType();
+ if (actionType == ControlPortActionType.ADD_SUBSCRIPTION || actionType == ControlPortActionType.REMOVE_SUBSCRIPTION) {
+ trace.info ("action: " + action);
+ } else if (trace.isDebugEnabled()) {
+ trace.debug ("action: " + action);
+ }
+
+ final Set oldSubscription = getConsumer().subscription();
+ final Set newSubscription = new HashSet<>(oldSubscription);
+ trace.info ("current topic subscription: " + newSubscription);
+ boolean subscriptionChanged = false;
+ switch (actionType) {
+ case ADD_SUBSCRIPTION:
+ action.getTopics().forEach (tpc -> {
+ newSubscription.add (tpc);
+ });
+ break;
+ case REMOVE_SUBSCRIPTION:
+ action.getTopics().forEach (tpc -> {
+ newSubscription.remove (tpc);
+ });
+ break;
+ default:
+ throw new Exception ("processControlPortActionEvent(): unimplemented action: " + actionType);
+ }
+ subscriptionChanged = !newSubscription.equals (oldSubscription);
+ if (!subscriptionChanged) {
+ trace.info("subscriptiopn has not changed: " + newSubscription);
+ } else {
+ if (newSubscription.isEmpty()) {
+ // no partition rebalance will happen, where we ususally commit offsets. Commit now.
+ // remove the content of the queue. It contains uncommitted messages.
+ getMessageQueue().clear();
+ OffsetManager offsetManager = getOffsetManager();
+ try {
+ awaitMessageQueueProcessed();
+ // the post-condition is, that all messages from the queue have submitted as
+ // tuples and its offsets +1 are stored in OffsetManager.
+ final boolean commitSync = true;
+ final boolean commitPartitionWise = false;
+ CommitInfo offsets = new CommitInfo (commitSync, commitPartitionWise);
+ synchronized (offsetManager) {
+ Set partitionsInOffsetManager = offsetManager.getMappedTopicPartitions();
+ Set currentAssignment = getAssignedPartitions();
+ for (TopicPartition tp: partitionsInOffsetManager) {
+ if (currentAssignment.contains (tp)) {
+ offsets.put (tp, offsetManager.getOffset (tp.topic(), tp.partition()));
+ }
+ }
+ }
+ if (!offsets.isEmpty()) {
+ commitOffsets (offsets);
+ }
+ // reset the counter for periodic commit
+ resetCommitPeriod (System.currentTimeMillis());
+ }
+ catch (InterruptedException | RuntimeException e) {
+ // Ignore InterruptedException, RuntimeException from commitOffsets is already traced.
+ }
+ offsetManager.clear();
+ }
+ subscribe (newSubscription, this);
+ // getChkptContext().getKind() is not reported properly. Streams Build 20180710104900 (4.3.0.0) never returns OPERATOR_DRIVEN
+ if (getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
+ trace.info ("initiating checkpointing with current topic subscription");
+ // createCheckpoint() throws IOException
+ boolean result = getChkptContext().createCheckpoint();
+ trace.info ("createCheckpoint() result: " + result);
+ }
+ }
+ } catch (Exception e) {
+ trace.error(e.getLocalizedMessage(), e);
+ throw new KafkaOperatorRuntimeException (e.getMessage(), e);
+ }
}
/**
- * Empty default implementation which ensures that 'config checkpoint' is at least ignored
+ * Checkpoints the current subscription of the consumer.
* @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onCheckpoint(com.ibm.streams.operator.state.Checkpoint)
*/
@Override
public void onCheckpoint (Checkpoint checkpoint) throws InterruptedException {
+ if (getOperatorContext().getNumberOfStreamingInputs() == 0 || !isCheckpointEnabled()) {
+ trace.debug ("onCheckpoint() - ignored");
+ return;
+ }
+ trace.log (DEBUG_LEVEL, "onCheckpoint() - entering. seq = " + checkpoint.getSequenceId());
+ if (getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
+ try {
+ // do not send an event here. In case of operator driven checkpoint it will never be processed (deadlock)
+ final ObjectOutputStream outputStream = checkpoint.getOutputStream();
+ final Set subscription = getConsumer().subscription();
+ outputStream.writeObject (subscription);
+ trace.info ("topics written into checkpoint: " + subscription);
+ } catch (IOException e) {
+ throw new RuntimeException (e.getMessage(), e);
+ }
+ }
+ else {
+ // periodic checkpoint - create the checkpoint by the event thread
+ sendStopPollingEvent();
+ Event event = new Event (Event.EventType.CHECKPOINT, checkpoint, true);
+ sendEvent (event);
+ event.await();
+ if (isSubscribedOrAssigned()) sendStartPollingEvent();
+ }
}
+
/**
* Empty default implementation which ensures that 'config checkpoint' is at least ignored
* @see com.ibm.streamsx.kafka.clients.consumer.ConsumerClient#onReset(com.ibm.streams.operator.state.Checkpoint)
*/
@Override
- public void onReset(Checkpoint checkpoint) throws InterruptedException {
+ public void onReset (Checkpoint checkpoint) throws InterruptedException {
+ trace.info ("onReset() - entering. seq = " + checkpoint.getSequenceId());
+ if (getOperatorContext().getNumberOfStreamingInputs() == 0 || !isCheckpointEnabled()) {
+ trace.debug ("onReset() - ignored");
+ return;
+ }
+ sendStopPollingEvent();
+ Event event = new Event (Event.EventType.RESET, checkpoint, true);
+ sendEvent (event);
+ event.await();
+ // do not start polling; reset happens before allPortsReady(), which starts polling
}
/**
- * Empty default implementation which ensures that 'config checkpoint' is at least ignored
+ * Resets the client by restoring the checkpointed subscription.
* @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processResetEvent(Checkpoint)
*/
@Override
+ @SuppressWarnings("unchecked")
protected void processResetEvent (Checkpoint checkpoint) {
+ final long chkptSeqId = checkpoint.getSequenceId();
+ trace.log (DEBUG_LEVEL, "processResetEvent() - entering. seq = " + chkptSeqId);
+ try {
+ final Set topics = (Set ) checkpoint.getInputStream().readObject();
+ trace.info ("topics from checkpoint = " + topics);
+ // subscribe, fetch offset is last committed offset.
+ subscribe (topics, this);
+ } catch (IllegalStateException | ClassNotFoundException | IOException e) {
+ trace.error ("reset failed: " + e.getLocalizedMessage());
+ throw new KafkaOperatorResetFailedException (MsgFormatter.format ("resetting operator {0} to checkpoint sequence ID {1,number,#} failed: {2}", getOperatorContext().getName(), chkptSeqId, e.getLocalizedMessage()), e);
+ }
}
/**
@@ -329,16 +459,22 @@ protected void processResetEvent (Checkpoint checkpoint) {
*/
@Override
protected void processCheckpointEvent (Checkpoint checkpoint) {
+ try {
+ final Set subscription = getConsumer().subscription();
+ checkpoint.getOutputStream().writeObject (subscription);
+ trace.log (DEBUG_LEVEL, "topics written into checkpoint: " + subscription);
+ } catch (IOException e) {
+ throw new RuntimeException (e.getLocalizedMessage(), e);
+ }
}
-
/**
* The builder for the consumer client following the builder pattern.
*/
- public static class Builder {
+ public static class Builder implements ConsumerClientBuilder {
private OperatorContext operatorContext;
private Class> keyClass;
@@ -356,8 +492,9 @@ public final Builder setOperatorContext(OperatorContext c) {
return this;
}
- public final Builder setKafkaProperties(KafkaOperatorProperties p) {
- this.kafkaProperties = p;
+ public final Builder setKafkaProperties (KafkaOperatorProperties p) {
+ this.kafkaProperties = new KafkaOperatorProperties();
+ this.kafkaProperties.putAll (p);
return this;
}
@@ -401,8 +538,11 @@ public final Builder setInitialStartPosition (StartPosition p) {
return this;
}
+ @Override
public ConsumerClient build() throws Exception {
- NonCrKafkaConsumerGroupClient client = new NonCrKafkaConsumerGroupClient (operatorContext, keyClass, valueClass, kafkaProperties, singleTopic);
+ KafkaOperatorProperties p = new KafkaOperatorProperties();
+ p.putAll (this.kafkaProperties);
+ NonCrKafkaConsumerGroupClient client = new NonCrKafkaConsumerGroupClient (operatorContext, keyClass, valueClass, p, singleTopic);
client.setPollTimeout (pollTimeout);
client.setCommitMode (commitMode);
client.setCommitCount (commitCount);
@@ -410,5 +550,10 @@ public ConsumerClient build() throws Exception {
client.setInitialStartPosition (initialStartPosition);
return client;
}
+
+ @Override
+ public int getImplementationMagic() {
+ return NonCrKafkaConsumerGroupClient.class.getName().hashCode();
+ }
}
}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/TopicPartitionUpdate.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/TopicPartitionUpdate.java
deleted file mode 100644
index 332b471b..00000000
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/consumer/TopicPartitionUpdate.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.ibm.streamsx.kafka.clients.consumer;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-
-import org.apache.kafka.common.TopicPartition;
-
-import com.google.gson.Gson;
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.ibm.streamsx.kafka.TopicPartitionUpdateParseException;
-import com.ibm.streamsx.kafka.i18n.Messages;
-
-public class TopicPartitionUpdate {
- private final static Gson gson = new Gson();
-
- private final TopicPartitionUpdateAction action;
- private final Map topicPartitionOffsetMap;
-
- public TopicPartitionUpdate(TopicPartitionUpdateAction action, Map topicPartitionOffsetMap) {
- this.action = action;
- this.topicPartitionOffsetMap = topicPartitionOffsetMap;
- }
-
- public TopicPartitionUpdateAction getAction() {
- return action;
- }
-
- public Map getTopicPartitionOffsetMap() {
- return topicPartitionOffsetMap;
- }
-
- @Override
- public String toString() {
- return "TopicPartitionUpdate [action=" + action + ", topicPartitionOffsetMap=" + topicPartitionOffsetMap + "]";
- }
-
- /**
- * Creates an Topic partition update from a JSON formatted String
- * @param json The JSON string
- * @return a TopicPartitionUpdate object
- * @throws TopicPartitionUpdateParseException parsing JSON failed
- */
- public static TopicPartitionUpdate fromJSON (String json) throws TopicPartitionUpdateParseException {
- JsonObject jsonObj = null;
- try {
- jsonObj = gson.fromJson (json, JsonObject.class);
- }
- catch (Exception e) {
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (e.getMessage(), e);
- exc.setJson (json);
- throw exc;
- }
-
- if (jsonObj == null) {
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "action", json==null? "null": json));
- exc.setJson(json);
- throw exc;
- }
- TopicPartitionUpdateAction action = null;
- if (jsonObj.has ("action")) { //$NON-NLS-1$
- try {
- action = TopicPartitionUpdateAction.valueOf (jsonObj.get ("action").getAsString().toUpperCase()); //$NON-NLS-1$
- }
- catch (Exception e) {
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (e.getMessage(), e);
- exc.setJson (json);
- throw exc;
- }
- } else {
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "action", json));
- exc.setJson (json);
- throw exc;
- }
-
- Map topicPartitionOffsetMap = new HashMap<>();
- if (jsonObj.has ("topicPartitionOffsets")) { //$NON-NLS-1$
- JsonArray arr = jsonObj.get ("topicPartitionOffsets").getAsJsonArray(); //$NON-NLS-1$
- Iterator it = arr.iterator();
- while (it.hasNext()) {
- JsonObject tpo = it.next().getAsJsonObject();
- if(!tpo.has ("topic")) { //$NON-NLS-1$
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "topic", json));
- exc.setJson (json);
- throw exc;
- }
-
- if(!tpo.has("partition")) { //$NON-NLS-1$
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (Messages.getString("INVALID_JSON_MISSING_KEY", "partition", json));
- exc.setJson (json);
- throw exc;
- }
- try {
- String topic = tpo.get ("topic").getAsString(); //$NON-NLS-1$
- int partition = tpo.get ("partition").getAsInt(); //$NON-NLS-1$
- long offset = tpo.has ("offset")? tpo.get ("offset").getAsLong(): OffsetConstants.NO_SEEK; //$NON-NLS-1$ //$NON-NLS-2$
- topicPartitionOffsetMap.put (new TopicPartition (topic, partition), new Long (offset));
- }
- catch (Exception e) {
- // Handle Number format errors
- TopicPartitionUpdateParseException exc = new TopicPartitionUpdateParseException (e.getMessage(), e);
- exc.setJson (json);
- throw exc;
- }
- }
- }
- return new TopicPartitionUpdate (action, topicPartitionOffsetMap);
- }
-}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/KafkaProducerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/AbstractKafkaProducerClient.java
similarity index 86%
rename from com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/KafkaProducerClient.java
rename to com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/AbstractKafkaProducerClient.java
index 24ea2b82..d6b34f76 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/KafkaProducerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/AbstractKafkaProducerClient.java
@@ -40,9 +40,9 @@
import com.ibm.streamsx.kafka.clients.metrics.MetricsUpdatedListener;
import com.ibm.streamsx.kafka.properties.KafkaOperatorProperties;
-public abstract class KafkaProducerClient extends AbstractKafkaClient {
+public abstract class AbstractKafkaProducerClient extends AbstractKafkaClient {
- private static final Logger logger = Logger.getLogger(KafkaProducerClient.class);
+ private static final Logger logger = Logger.getLogger(AbstractKafkaProducerClient.class);
public static final int CLOSE_TIMEOUT_MS = 5000;
protected KafkaProducer, ?> producer;
@@ -134,7 +134,7 @@ public void afterCustomMetricsUpdated() {
public void beforeCustomMetricsUpdated() { }
}
- public KafkaProducerClient (OperatorContext operatorContext, Class keyClass, Class valueClass,
+ public AbstractKafkaProducerClient (OperatorContext operatorContext, Class keyClass, Class valueClass,
boolean guaranteeRecordOrder,
KafkaOperatorProperties kafkaProperties) throws Exception {
super (operatorContext, kafkaProperties, false);
@@ -167,7 +167,7 @@ protected final synchronized void createProducer() {
metricsFetcher = new MetricsFetcher (getOperatorContext(), new MetricsProvider() {
@Override
public Map getMetrics() {
- synchronized (KafkaProducerClient.this) {
+ synchronized (AbstractKafkaProducerClient.this) {
return producer.metrics();
}
}
@@ -266,20 +266,52 @@ protected void configureProperties() throws Exception {
if (!kafkaProperties.containsKey (ProducerConfig.LINGER_MS_CONFIG)) {
this.kafkaProperties.put (ProducerConfig.LINGER_MS_CONFIG, "100");
}
- // max.in.flight.requests.per.connection
- // when record order is to be kept and retries are enabled, max.in.flight.requests.per.connection must be 1
- final long retries = kafkaProperties.containsKey (ProducerConfig.RETRIES_CONFIG)? Long.parseLong (this.kafkaProperties.getProperty (ProducerConfig.RETRIES_CONFIG).trim()): Integer.MAX_VALUE;
- final String maxInFlightRequestsPerConWhenUnset = guaranteeOrdering && retries > 0l? "1": "10";
+
+ // when record order is to be kept, use the idempotent producer
+ if (guaranteeOrdering) {
+ kafkaProperties.setProperty (ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
+ logger.info ("producer config '" + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "' has been set to 'true' to guarantee record ordering. Adjusting also 'acks' to 'all' and 'retries' to be > 0.");
+ }
+ final boolean enableIdempotence = Boolean.parseBoolean (kafkaProperties.getProperty (ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false").trim());
+ if (enableIdempotence) {
+ // --- begin adjustment for enable.idempotence = true
+ // Note that enabling idempotence requires max.in.flight.requests.per.connection
+ // to be less than or equal to 5, retries to be greater than 0 and acks must be 'all'.
+ if (kafkaProperties.containsKey (ProducerConfig.ACKS_CONFIG)) {
+ final String acks = kafkaProperties.getProperty (ProducerConfig.ACKS_CONFIG);
+ if (!(acks.equals("all") || acks.equals("-1"))) {
+ logger.warn (MsgFormatter.format ("producer config ''{0}'' has been changed from {1} to {2} for enable.idempotence=true.",
+ ProducerConfig.ACKS_CONFIG, acks, "all"));
+ kafkaProperties.setProperty (ProducerConfig.ACKS_CONFIG, "all");
+ }
+ }
+ else this.kafkaProperties.setProperty (ProducerConfig.ACKS_CONFIG, "all");
+ if (kafkaProperties.containsKey (ProducerConfig.RETRIES_CONFIG)) {
+ final long retries = Long.parseLong (kafkaProperties.getProperty (ProducerConfig.RETRIES_CONFIG).trim());
+ if (retries < 1L) {
+ final String retriesAdjustGreaterThan0 = "1";
+ logger.warn (MsgFormatter.format ("producer config ''{0}'' has been changed from {1,number,#} to {2} for enable.idempotence=true.",
+ ProducerConfig.RETRIES_CONFIG, retries, retriesAdjustGreaterThan0));
+ this.kafkaProperties.setProperty (ProducerConfig.RETRIES_CONFIG, retriesAdjustGreaterThan0);
+ }
+ }
+ }
+
+ final long maxInFlightRequestsPerConWhenUnset = enableIdempotence? 5L: 10L;
if (!kafkaProperties.containsKey (ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) {
- this.kafkaProperties.put (ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, maxInFlightRequestsPerConWhenUnset);
+ // config unset
+ this.kafkaProperties.put (ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "" + maxInFlightRequestsPerConWhenUnset);
}
else {
+ // config set; adjust when too high for enable.idempotence=true
final long maxInFlightRequests = Long.parseLong (this.kafkaProperties.getProperty (ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION).trim());
- if (guaranteeOrdering && maxInFlightRequests > 1l && retries > 0l) {
- this.kafkaProperties.put (ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
- logger.warn("producer config '" + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "' has been turned to '1' for guaranteed retention of record order per topic partition.");
+ if (enableIdempotence && maxInFlightRequests > maxInFlightRequestsPerConWhenUnset) {
+ this.kafkaProperties.put (ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "" + maxInFlightRequestsPerConWhenUnset);
+ logger.warn (MsgFormatter.format ("producer config ''{0}'' has been turned from ''{1,number,#}'' to ''{2,number,#}'' for ''{3}''=true for guaranteed retention of record order per topic partition.",
+ ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, maxInFlightRequests, maxInFlightRequestsPerConWhenUnset, ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG));
}
}
+
// batch.size
if (!kafkaProperties.containsKey (ProducerConfig.BATCH_SIZE_CONFIG)) {
this.kafkaProperties.put (ProducerConfig.BATCH_SIZE_CONFIG, "32768");
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/TrackingProducerClient.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/TrackingProducerClient.java
index cfe4fa77..5df2365d 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/TrackingProducerClient.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/clients/producer/TrackingProducerClient.java
@@ -47,7 +47,7 @@
* @author IBM Kafka toolkit team
* @since toolkit version 2.2
*/
-public class TrackingProducerClient extends KafkaProducerClient implements ClientCallback {
+public class TrackingProducerClient extends AbstractKafkaProducerClient implements ClientCallback {
private static class RecoveryEvent {}
private static final Logger trace = Logger.getLogger (TrackingProducerClient.class);
@@ -130,7 +130,7 @@ public void run() {
/**
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#close(long)
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#close(long)
*/
@Override
public void close(long timeoutMillis) {
@@ -268,7 +268,7 @@ private void awaitRecoveryEventAndRecover() throws InterruptedException {
}
/**
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#processRecord(ProducerRecord, Tuple)
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#processRecord(ProducerRecord, Tuple)
*/
@Override
public void processRecord (ProducerRecord, ?> producerRecord, Tuple associatedTuple) throws Exception {
@@ -289,7 +289,7 @@ public void processRecord (ProducerRecord, ?> producerRecord, Tuple associated
/**
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#processRecords(List, Tuple)
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#processRecords(List, Tuple)
*/
@Override
public void processRecords (List> records, Tuple associatedTuple) throws Exception {
@@ -409,7 +409,7 @@ public void tupleFailedTemporarily (long seqNumber, Exception exception) {
/**
* Tries to cancel all send requests that are not yet done.
*
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#tryCancelOutstandingSendRequests(boolean)
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#tryCancelOutstandingSendRequests(boolean)
*/
@Override
public void tryCancelOutstandingSendRequests (boolean mayInterruptIfRunning) {
@@ -427,7 +427,7 @@ public void tryCancelOutstandingSendRequests (boolean mayInterruptIfRunning) {
/**
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#drain()
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#drain()
*/
@Override
public void drain() throws Exception {
@@ -450,7 +450,7 @@ public void drain() throws Exception {
}
/**
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#checkpoint(com.ibm.streams.operator.state.Checkpoint)
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#checkpoint(com.ibm.streams.operator.state.Checkpoint)
*/
@Override
public void checkpoint(Checkpoint checkpoint) throws Exception {
@@ -459,7 +459,7 @@ public void checkpoint(Checkpoint checkpoint) throws Exception {
/**
- * @see com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient#reset(com.ibm.streams.operator.state.Checkpoint)
+ * @see com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient#reset(com.ibm.streams.operator.state.Checkpoint)
*/
@Override
public void reset (Checkpoint checkpoint) throws Exception {
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages.properties
index e1f99457..3f88411f 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages.properties
@@ -5,10 +5,10 @@
#
TOPIC_NOT_SPECIFIED=CDIST2150E Topic has not been specified. Either the input schema must contain an attribute named "topic" or one of the following parameters must be specified: 'topicAttribute' or 'topic'.
-INPUT_ATTRIBUTE_NOT_FOUND=CDIST2151E Input attribute not found: "{0}".
+INPUT_ATTRIBUTE_NOT_FOUND=CDIST2151E Input attribute not found: ''{0}''.
PREVIOUS_BATCH_FAILED_TO_SEND=CDIST2152E Previous batch failed to send: {0}
INVALID_PARAMETER_VALUE_GT=CDIST2153E Invalid value for the ''{0}'' parameter: {1}. Valid values must be greater than {2}.
-OUTPUT_ATTRIBUTE_NOT_FOUND=CDIST2154E Output attribute not found: "{0}"
+OUTPUT_ATTRIBUTE_NOT_FOUND=CDIST2154E Output attribute not found in stream schema: ''{0}''
TRIGGER_PARAM_MISSING=CDIST2155E The 'triggerCount' parameter is not specified. The 'triggerCount' parameter must be specified when the trigger for the consistent region is operatorDriven.
ERROR_ACQUIRING_PERMIT=CDIST2156E Error acquiring permit: {0}
OUTPUT_MESSAGE_ATTRIBUTE_MISSING=CDIST2157E Either 'outputMessageAttributeName' parameter must specify an existing output attribute, or the output schema must contain an output attribute named "message".
@@ -17,11 +17,11 @@ APPLICATION_CONFIGURATION_NOT_FOUND=CDIST2159W Application configuration not fou
MESSAGE_ATTRIBUTE_NOT_FOUND=CDIST2160E Message has not been specified. Either the 'messageAttribute' parameter must be specified or the input schema must contain an attribute named "message".
OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E The {0} operator cannot be the start of a consistent region.
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E The 'partition' input attribute must have a type of "int32" when the 'partitionAttribute' parameter is not specified.
-UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Unsupported type: "{0}" when setting attribute "{1}"
+UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Unsupported type: ''{0}'' when setting attribute ''{1}''
START_TIME_PARAM_NOT_FOUND=CDIST2164E The 'startTime' parameter must be specified when the 'startPosition' parameter value is set to "Time".
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W The 'topic', 'pattern', 'partition' and 'startPositition' parameters are ignored when the input port is present.
+PARAMS_INCOMPATIBLE_WITH_INPUT_PORT=CDIST2165E The 'topic', 'pattern', 'partition' and 'startPositition' parameters cannot be used when the input port is present.
TOPIC_OR_INPUT_PORT=CDIST2166E The 'topic' or 'pattern' parameter must be specified when no input port is defined.
-INVALID_JSON_MISSING_KEY=CDIST2167E Invalid JSON! Missing \"{0}\" key. Update is being ignored. jsonString={1}
+INVALID_JSON_MISSING_KEY=CDIST2167E Invalid JSON. Missing ''{0}'' key. Action is being ignored. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W The properties file cannot be found: {0}
START_OFFSET_PARAM_NOT_FOUND=CDIST2169E The 'startOffset' parameter must be specified when the 'startPosition' parameter value is set to "Offset".
PARTITION_SIZE_NOT_EQUAL_TO_OFFSET_SIZE=CDIST2170E The number of values specified for the 'partition' parameter' must be the same as the number of values specified for the 'startOffset' parameter.
@@ -35,14 +35,15 @@ PARAM_IGNORED_NOT_IN_CONSITENT_REGION=CDIST2176W The parameter ''{0}'' is ignore
PARAM_INCOMPATIBLE_WITH_OTHER_PARAM_VAL=CDIST2177E The ''{0}'' parameter cannot be used when the ''{1}'' parameter has the value ''{2}''.
PARAM_VAL_INCOMPATIBLE_WITH_OTHER_PARAM_VAL=CDIST2178E The ''{0}'' parameter cannot have the value ''{1}'' when the ''{2}'' parameter has the value ''{3}''.
PARAM_VAL_INCOMPATIBLE_WITH_INPUT_PORT=CDIST2179E The operator cannot have an input port when the ''{1}'' parameter has the value ''{2}''.
-GROUP_ID_REQUIRED_FOR_PARAM_VAL=CDIST2180E A group-ID must be specified when the ''{1}'' parameter has the value ''{2}''. Specify a group.id in a property file or app option or use the groupId parameter.
+PARAM_IGNORED_FOR_PERIODIC_CR=CDIST2180W The ''{0}'' parameter is ignored in a periodic consistent region.
CONSUMER_GROUP_IN_MULTIPLE_CONSISTENT_REGIONS=CDIST2181E Multiple consistent regions detected for {0} operators in consumer group ''{1}''. All consumer operators of a consumer group must participate in the same consistent region.
PARAM_X_REQUIRED_WHEN_PARAM_Y_USED=CDIST2182E The ''{0}'' parameter must be specified when the ''{1}'' parameter is used.
INVALID_PARAMETER_VALUE=CDIST2183E The ''{0}'' value is invalid for the ''{1}'' parameter. Valid values are {2}.
-CHECKPOINT_CONFIG_NOT_SUPPORTED=CDIST2184E The following operator does not support checkpoint configuration: ''{0}''
+WARN_CONTROLPORT_DEPRECATED_IN_CR=CDIST2184W The usage of the optional control input port in the ''{0}'' operator has been deprecated when the operator is part of a consistent region.
JCP_REQUIRED_NOCR_STARTPOS_NOT_DEFAULT=CDIST2185E To support ''startPosition: {0};'', the application graph must contain a ''JobControlPlane'' operator. Please add a ''JobControlPlane'' operator to your application graph.
WARN_ENSURE_JCP_ADDED_STARTPOS_NOT_DEFAULT=CDIST2186W To fully support other values than 'Default' for the 'startPosition' parameter in the {0} operator, the application graph must contain a 'JobControlPlane' operator. Remember to add a 'JobControlPlane' operator to your application graph.
PARAMETERS_EXCLUDE_EACH_OTHER=CDIST2187E The ''{0}'' parameter and the ''{1}'' parameter exclude each other. Specify only one of them.
PATTERN_SUBSCRIPTION_REQUIRES_GROUP_MGT=CDIST2188E The ''{0}'' parameter can only be used when Kafka group management can be enabled. Make sure that the ''{1}'' operator is configured in a way that enables group management. Please consult the SPL documentation of the ''{2}'' operator for the preconditions.
TOPIC_ATTRIBUTE_NOT_STRING=CDIST2189E The 'topic' input attribute must have a type of "rstring" or "ustring" when the 'topicAttribute' parameter is not specified.
-PRODUCER_INVALID_OPORT_SCHEMA=CDIST2190E Invalid schema for the optional output port of the ''{0}'' operator. The port supports two optional attributes, one must be an rstring or ustring, and one a tuple type with same schema as the operator's input port.
+PRODUCER_INVALID_OPORT_SCHEMA=CDIST2190E Invalid schema for the optional output port of the ''{0}'' operator. The port supports two optional attributes, one must be an rstring, ustring or an optional of them, and one a tuple type with same schema as the operator's input port.
+INVALID_JSON=CDIST2191E Invalid JSON. Action is being ignored. jsonString={0}
\ No newline at end of file
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_de_DE.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_de_DE.properties
index 1690d224..6dd74914 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_de_DE.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_de_DE.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E Der Operator '{0}' kann nicht
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E Das Eingabeattribut 'partition' muss den Typ "int32" aufweisen, wenn der Parameter 'partitionAttribute' nicht angegeben ist.
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Nicht unterst\u00fctzter Typ "{0}" beim Festlegen von Attribut "{1}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E Der Parameter 'startTime' muss angegeben werden, wenn der Wert des Parameters 'startPosition' auf "Time" gesetzt ist.
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W Die Parameter 'topic', 'partition' und 'startPositition' werden ignoriert, wenn der Eingabeport vorhanden ist.
TOPIC_OR_INPUT_PORT=CDIST2166E Der Parameter 'topic' muss angegeben werden, wenn kein Eingabeport definiert ist.
INVALID_JSON_MISSING_KEY=CDIST2167E JSON ung\u00fcltig! Schl\u00fcssel \"{0}\" fehlt. Aktualisierung wird ignoriert. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W Die Eigenschaftendatei kann nicht gefunden werden: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_es_ES.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_es_ES.properties
index 72ddf3f3..22243502 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_es_ES.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_es_ES.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E El operador {0} no puede ser
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E El atributo de entrada 'partition' debe tener un tipo "int32" cuando no se especifica el par\u00e1metro 'partitionAttribute'.
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Tipo no soportado: "{0}" al establecer el atributo "{1}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E El par\u00e1metro 'startTime' se tiene que especificar cuando el valor del par\u00e1metro 'startPosition' se establece en "Time".
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W Los par\u00e1metros 'topic', 'partition' y 'startPositition' se ignoran cuando el puerto de entrada est\u00e1 presente.
TOPIC_OR_INPUT_PORT=CDIST2166E El par\u00e1metro 'topic' se tiene especificar cuando no hay ning\u00fan puerto de entrada definido.
INVALID_JSON_MISSING_KEY=CDIST2167E \u00a1JSON no v\u00e1lido! Falta la clave \"{0}\". Se ignora la actualizaci\u00f3n. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W No se puede encontrar el archivo de propiedades: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_fr_FR.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_fr_FR.properties
index fb7e36c0..dfa6cf1b 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_fr_FR.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_fr_FR.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E L'op\u00e9rateur {0} ne peut
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E L'attribut d'entr\u00e9e 'partition' doit avoir le type "int32" lorsque le param\u00e8tre 'partitionAttribute' n'est pas indiqu\u00e9.
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Type "{0}" non pris en charge lors de la d\u00e9finition de l''attribut "{1}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E Le param\u00e8tre 'startTime' doit \u00eatre indiqu\u00e9 lorsque le param\u00e8tre 'startPosition' est d\u00e9fini avec la valeur "Time".
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W Les param\u00e8tres 'topic', 'partition' et 'startPositition' sont ignor\u00e9s lorsque le port d'entr\u00e9e est pr\u00e9sent.
TOPIC_OR_INPUT_PORT=CDIST2166E Le param\u00e8tre 'topic' doit \u00eatre indiqu\u00e9 lorsqu'aucun port d'entr\u00e9e n'est d\u00e9fini.
INVALID_JSON_MISSING_KEY=CDIST2167E JSON non valide. Cl\u00e9 \"{0}\" manquante. La mise \u00e0 jour est ignor\u00e9e. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W Le fichier de propri\u00e9t\u00e9s est introuvable : {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_it_IT.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_it_IT.properties
index 29a78a34..521d5bf0 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_it_IT.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_it_IT.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E L'operatore {0} non pu\u00f2
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E L'attributo di input 'partition' deve avere un tipo "int32" quando il parametro 'partitionAttribute' non \u00e8 stato specificato.
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Tipo non supportato "{0}" quando si imposta l''attributo "{1}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E Il parametro 'startTime' deve essere specificato quando il valore del parametro 'startPosition' \u00e8 impostato su "Time".
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W I parametri 'topic', 'partition' e 'startPositition' vengono ignorati quando \u00e8 presente la porta di input.
TOPIC_OR_INPUT_PORT=CDIST2166E Il parametro 'topic' deve essere specificato quando non \u00e8 definita nessuna porta di input.
INVALID_JSON_MISSING_KEY=CDIST2167E JSON non valido. Chiave \"{0}\" mancante. L''aggiornamento viene ignorato. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W Impossibile trovare il file delle propriet\u00e0: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ja_JP.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ja_JP.properties
index 36a79644..4797a4b4 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ja_JP.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ja_JP.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E {0} \u30aa\u30da\u30ec\u30fc\
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E 'partitionAttribute' \u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u304c\u6307\u5b9a\u3055\u308c\u3066\u3044\u306a\u3044\u5834\u5408\u3001'partition' \u5165\u529b\u5c5e\u6027\u306f "int32" \u578b\u3067\u306a\u3051\u308c\u3070\u306a\u308a\u307e\u305b\u3093\u3002
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E \u5c5e\u6027 "{1}" \u3092\u8a2d\u5b9a\u4e2d\u306f\u30b5\u30dd\u30fc\u30c8\u3055\u308c\u306a\u3044\u578b: "{0}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E 'startPosition' \u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u5024\u304c "Time" \u306b\u8a2d\u5b9a\u3055\u308c\u3066\u3044\u308b\u5834\u5408\u3001'startTime' \u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u3092\u6307\u5b9a\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W \u5165\u529b\u30dd\u30fc\u30c8\u304c\u5b58\u5728\u3059\u308b\u5834\u5408\u3001\u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u306e 'topic'\u3001'partition'\u3001\u304a\u3088\u3073 'startPositition' \u306f\u7121\u8996\u3055\u308c\u307e\u3059\u3002
TOPIC_OR_INPUT_PORT=CDIST2166E \u5165\u529b\u30dd\u30fc\u30c8\u304c\u5b9a\u7fa9\u3055\u308c\u3066\u3044\u306a\u3044\u5834\u5408\u3001'topic' \u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u3092\u6307\u5b9a\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002
INVALID_JSON_MISSING_KEY=CDIST2167E \u7121\u52b9\u306a JSON! \"{0}\" \u30ad\u30fc\u304c\u3042\u308a\u307e\u305b\u3093\u3002 \u66f4\u65b0\u306f\u7121\u8996\u3055\u308c\u3066\u3044\u307e\u3059\u3002 jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W \u30d7\u30ed\u30d1\u30c6\u30a3\u30fc\u30fb\u30d5\u30a1\u30a4\u30eb\u304c\u898b\u3064\u304b\u308a\u307e\u305b\u3093: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ko_KR.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ko_KR.properties
index eb8f5448..4ed86497 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ko_KR.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ko_KR.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E {0} \uc5f0\uc0b0\uc790\ub294
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E 'partitionAttribute' \ub9e4\uac1c\ubcc0\uc218\uac00 \uc9c0\uc815\ub418\uc9c0 \uc54a\uc740 \uacbd\uc6b0 'partition' \uc785\ub825 \uc18d\uc131\uc758 \uc720\ud615\uc774 "int32"\uc5ec\uc57c \ud569\ub2c8\ub2e4.
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E "{1}" \uc18d\uc131 \uc124\uc815 \uc2dc "{0}" \uc720\ud615\uc740 \uc9c0\uc6d0\ub418\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4.
START_TIME_PARAM_NOT_FOUND=CDIST2164E 'startPosition' \ub9e4\uac1c\ubcc0\uc218 \uac12\uc774 "Time"\uc73c\ub85c \uc124\uc815\ub41c \uacbd\uc6b0 'startTime' \ub9e4\uac1c\ubcc0\uc218\ub97c \uc9c0\uc815\ud574\uc57c \ud569\ub2c8\ub2e4.
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W \uc785\ub825 \ud3ec\ud2b8\uac00 \uc874\uc7ac\ud558\ub294 \uacbd\uc6b0 'topic', 'partition' \ubc0f 'startPositition' \ub9e4\uac1c\ubcc0\uc218\uac00 \ubb34\uc2dc\ub429\ub2c8\ub2e4.
TOPIC_OR_INPUT_PORT=CDIST2166E \uc785\ub825 \ud3ec\ud2b8\uac00 \uc815\uc758\ub418\uc9c0 \uc54a\uc740 \uacbd\uc6b0 'topic' \ub9e4\uac1c\ubcc0\uc218\ub97c \uc9c0\uc815\ud574\uc57c \ud569\ub2c8\ub2e4.
INVALID_JSON_MISSING_KEY=CDIST2167E JSON\uc774 \uc720\ud6a8\ud558\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4! \"{0}\" \ud0a4\uac00 \ub204\ub77d\ub418\uc5c8\uc2b5\ub2c8\ub2e4. \uac31\uc2e0\uc774 \ubb34\uc2dc\ub429\ub2c8\ub2e4. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W \ud2b9\uc131 \ud30c\uc77c\uc744 \ucc3e\uc744 \uc218 \uc5c6\uc74c: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_pt_BR.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_pt_BR.properties
index 90ef8700..6a27ab7f 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_pt_BR.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_pt_BR.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E O operador {0} n\u00e3o pode
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E O atributo de entrada 'partition' deve ter um tipo de "int32" quando o par\u00e2metro 'partitionAttribute' n\u00e3o est\u00e1 especificado.
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E Tipo n\u00e3o suportado: "{0}" ao configurar o atributo "{1}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E O par\u00e2metro 'startTime' deve ser especificado quando o valor do par\u00e2metro 'startPosition' est\u00e1 configurado como "Time".
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W Os par\u00e2metros 'topic', 'partition' e 'startPositition' s\u00e3o ignorados quando a porta de entrada est\u00e1 presente.
TOPIC_OR_INPUT_PORT=CDIST2166E O par\u00e2metro 'topic' deve ser especificado quando nenhuma porta de entrada est\u00e1 definida.
INVALID_JSON_MISSING_KEY=CDIST2167E JSON inv\u00e1lido! Chave \"{0}\" ausente. A atualiza\u00e7\u00e3o est\u00e1 sendo ignorada. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W O arquivo de propriedades n\u00e3o pode ser localizado: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ru_RU.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ru_RU.properties
index 187a2b97..dc24a208 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ru_RU.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_ru_RU.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E \u041e\u043f\u0435\u0440\u043
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E \u0415\u0441\u043b\u0438 \u043d\u0435 \u0437\u0430\u0434\u0430\u043d \u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440 'partitionAttribute', \u0442\u0438\u043f\u043e\u043c \u0432\u0445\u043e\u0434\u043d\u043e\u0433\u043e \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0430 'partition' \u0434\u043e\u043b\u0436\u043d\u043e \u0431\u044b\u0442\u044c "int32".
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E \u041d\u0435\u043f\u043e\u0434\u0434\u0435\u0440\u0436\u0438\u0432\u0430\u0435\u043c\u044b\u0439 \u0442\u0438\u043f "{0}" \u043f\u0440\u0438 \u043d\u0430\u0437\u043d\u0430\u0447\u0435\u043d\u0438\u0438 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0430 "{1}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E \u0415\u0441\u043b\u0438 \u0432 \u043a\u0430\u0447\u0435\u0441\u0442\u0432\u0435 \u0437\u043d\u0430\u0447\u0435\u043d\u0438\u044f \u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u0430 'startPosition' \u0437\u0430\u0434\u0430\u043d\u043e "Time", \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u0437\u0430\u0434\u0430\u043d \u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440 'startTime'.
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W \u041f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u044b 'topic', 'partition' \u0438 'startPositition' \u0438\u0433\u043d\u043e\u0440\u0438\u0440\u0443\u044e\u0442\u0441\u044f, \u0435\u0441\u043b\u0438 \u043f\u0440\u0438\u0441\u0443\u0442\u0441\u0442\u0432\u0443\u0435\u0442 \u0432\u0445\u043e\u0434\u043d\u043e\u0439 \u043f\u043e\u0440\u0442.
TOPIC_OR_INPUT_PORT=CDIST2166E \u0415\u0441\u043b\u0438 \u043d\u0438\u043a\u0430\u043a\u043e\u0439 \u0432\u0445\u043e\u0434\u043d\u043e\u0439 \u043f\u043e\u0440\u0442 \u043d\u0435 \u0437\u0430\u0434\u0430\u043d, \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u0437\u0430\u0434\u0430\u043d \u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440 'topic'.
INVALID_JSON_MISSING_KEY=CDIST2167E \u041d\u0435\u0434\u043e\u043f\u0443\u0441\u0442\u0438\u043c\u044b\u0439 JSON! \u041e\u0442\u0441\u0443\u0442\u0441\u0442\u0432\u0443\u0435\u0442 \u043a\u043b\u044e\u0447 \"{0}\". \u041e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u0435 \u0438\u0433\u043d\u043e\u0440\u0438\u0440\u0443\u0435\u0442\u0441\u044f. jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W \u041d\u0435 \u0443\u0434\u0430\u0435\u0442\u0441\u044f \u043d\u0430\u0439\u0442\u0438 \u0444\u0430\u0439\u043b \u0441\u0432\u043e\u0439\u0441\u0442\u0432: {0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_CN.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_CN.properties
index 79a19a23..32f49608 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_CN.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_CN.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E {0} \u64cd\u4f5c\u7a0b\u5e8f\
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E \u5982\u679c\u672a\u6307\u5b9a\u201cpartitionAttribute\u201d\u53c2\u6570\uff0c\u90a3\u4e48\u201cpartition\u201d\u8f93\u5165\u5c5e\u6027\u5fc5\u987b\u4e3a\u201cint32\u201d\u7c7b\u578b\u3002
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E \u8bbe\u7f6e\u5c5e\u6027\u201c{1}\u201d\u65f6\uff0c\u7c7b\u578b\u201c{0}\u201d\u4e0d\u53d7\u652f\u6301
START_TIME_PARAM_NOT_FOUND=CDIST2164E \u5982\u679c\u201cstartPosition\u201d\u53c2\u6570\u503c\u8bbe\u7f6e\u4e3a\u201cTime\u201d\uff0c\u90a3\u4e48\u5fc5\u987b\u6307\u5b9a\u201cstartTime\u201d\u53c2\u6570\u3002
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W \u5982\u679c\u8f93\u5165\u7aef\u53e3\u5b58\u5728\uff0c\u90a3\u4e48\u201ctopic\u201d\u3001\u201cpartition\u201d\u548c\u201cstartPositition\u201d\u53c2\u6570\u88ab\u5ffd\u7565\u3002
TOPIC_OR_INPUT_PORT=CDIST2166E \u5982\u679c\u672a\u5b9a\u4e49\u8f93\u5165\u7aef\u53e3\uff0c\u90a3\u4e48\u5fc5\u987b\u6307\u5b9a\u201ctopic\u201d\u53c2\u6570\u3002
INVALID_JSON_MISSING_KEY=CDIST2167E \u65e0\u6548 JSON\uff01\u7f3a\u5c11\u201c{0}\u201d\u952e\u3002\u66f4\u65b0\u5c06\u88ab\u5ffd\u7565\u3002jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W \u627e\u4e0d\u5230\u5c5e\u6027\u6587\u4ef6\uff1a{0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_TW.properties b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_TW.properties
index 9f2ee7b3..7bbfad5b 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_TW.properties
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/i18n/KafkaMessages_zh_TW.properties
@@ -19,7 +19,6 @@ OPERATOR_NOT_START_OF_CONSISTENT_REGION=CDIST2161E {0} \u904b\u7b97\u5b50\u4e0d\
PARTITION_ATTRIBUTE_NOT_INT32=CDIST2162E \u7576\u6c92\u6709\u6307\u5b9a 'partitionAttribute' \u53c3\u6578\u6642\uff0c'partition' \u8f38\u5165\u5c6c\u6027\u7684\u985e\u578b\u5fc5\u9808\u70ba "int32"\u3002
UNSUPPORTED_TYPE_EXCEPTION=CDIST2163E \u7576\u8a2d\u5b9a\u5c6c\u6027 "{1}" \u6642\uff0c\u4e0d\u652f\u63f4\u985e\u578b "{0}"
START_TIME_PARAM_NOT_FOUND=CDIST2164E \u7576 'startPosition' \u53c3\u6578\u503c\u8a2d\u70ba "Time" \u6642\uff0c\u5fc5\u9808\u6307\u5b9a 'startTime' \u53c3\u6578\u3002
-PARAMS_IGNORED_WITH_INPUT_PORT=CDIST2165W \u7576\u5b58\u5728\u8f38\u5165\u57e0\u6642\uff0c\u5c07\u6703\u5ffd\u7565 'topic'\u3001'partition' \u53ca 'startPositition' \u53c3\u6578\u3002
TOPIC_OR_INPUT_PORT=CDIST2166E \u7576\u6c92\u6709\u5b9a\u7fa9\u8f38\u5165\u57e0\u6642\uff0c\u5fc5\u9808\u6307\u5b9a 'topic' \u53c3\u6578\u3002
INVALID_JSON_MISSING_KEY=CDIST2167E JSON \u7121\u6548\uff01\u907a\u6f0f \"{0}\" \u7d22\u5f15\u9375\u3002\u66f4\u65b0\u6703\u88ab\u5ffd\u7565\u3002jsonString={1}
PROPERTIES_FILE_NOT_FOUND=CDIST2168W \u627e\u4e0d\u5230\u5167\u5bb9\u6a94\uff1a{0}
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaConsumerOperator.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaConsumerOperator.java
index 07bafc31..e1453103 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaConsumerOperator.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaConsumerOperator.java
@@ -13,12 +13,16 @@
*/
package com.ibm.streamsx.kafka.operators;
+import java.io.ObjectInputStream;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
import org.apache.kafka.clients.consumer.ConsumerConfig;
@@ -29,6 +33,7 @@
import com.ibm.streams.operator.OperatorContext;
import com.ibm.streams.operator.OperatorContext.ContextCheck;
import com.ibm.streams.operator.OutputTuple;
+import com.ibm.streams.operator.ProcessingElement;
import com.ibm.streams.operator.StreamSchema;
import com.ibm.streams.operator.StreamingData.Punctuation;
import com.ibm.streams.operator.StreamingInput;
@@ -43,26 +48,30 @@
import com.ibm.streams.operator.state.ConsistentRegionContext;
import com.ibm.streams.operator.types.RString;
import com.ibm.streams.operator.types.ValueFactory;
+import com.ibm.streamsx.kafka.ControlportJsonParseException;
import com.ibm.streamsx.kafka.Features;
import com.ibm.streamsx.kafka.KafkaClientInitializationException;
import com.ibm.streamsx.kafka.KafkaConfigurationException;
+import com.ibm.streamsx.kafka.KafkaOperatorResetFailedException;
import com.ibm.streamsx.kafka.MsgFormatter;
-import com.ibm.streamsx.kafka.TopicPartitionUpdateParseException;
import com.ibm.streamsx.kafka.clients.consumer.CommitMode;
import com.ibm.streamsx.kafka.clients.consumer.ConsumerClient;
+import com.ibm.streamsx.kafka.clients.consumer.ConsumerClientBuilder;
+import com.ibm.streamsx.kafka.clients.consumer.ControlPortAction;
+import com.ibm.streamsx.kafka.clients.consumer.ControlPortActionType;
import com.ibm.streamsx.kafka.clients.consumer.CrKafkaConsumerGroupClient;
import com.ibm.streamsx.kafka.clients.consumer.CrKafkaStaticAssignConsumerClient;
+import com.ibm.streamsx.kafka.clients.consumer.DummyConsumerClient;
import com.ibm.streamsx.kafka.clients.consumer.NonCrKafkaConsumerClient;
import com.ibm.streamsx.kafka.clients.consumer.NonCrKafkaConsumerGroupClient;
import com.ibm.streamsx.kafka.clients.consumer.StartPosition;
-import com.ibm.streamsx.kafka.clients.consumer.TopicPartitionUpdate;
import com.ibm.streamsx.kafka.i18n.Messages;
import com.ibm.streamsx.kafka.properties.KafkaOperatorProperties;
public abstract class AbstractKafkaConsumerOperator extends AbstractKafkaOperator {
private static final Logger logger = Logger.getLogger(AbstractKafkaConsumerOperator.class);
-
+
private static final long DEFAULT_CONSUMER_TIMEOUT = 100l;
private static final long SHUTDOWN_TIMEOUT = 15l;
private static final TimeUnit SHUTDOWN_TIMEOUT_TIMEUNIT = TimeUnit.SECONDS;
@@ -89,11 +98,16 @@ public abstract class AbstractKafkaConsumerOperator extends AbstractKafkaOperato
public static final String COMMIT_COUNT_PARAM = "commitCount"; //$NON-NLS-1$
public static final String COMMIT_PERIOD_PARAM = "commitPeriod"; //$NON-NLS-1$
public static final String START_OFFSET_PARAM = "startOffset"; //$NON-NLS-1$
+ public static final String GROUP_ID_PARAM = "groupId";
+ public static final String STATIC_GROUP_MEMBER_PARAM = "staticGroupMember";
private static final double DEFAULT_COMMIT_PERIOD = 5.0;
private Thread processThread;
- private ConsumerClient consumer;
+ private ConsumerClientBuilder groupEnabledClientBuilder;
+ private ConsumerClientBuilder staticAssignClientBuilder;
+ private Map magics = new HashMap<>();
+ private AtomicReference consumerRef;
private AtomicBoolean shutdown;
/* Parameters */
@@ -113,12 +127,14 @@ public abstract class AbstractKafkaConsumerOperator extends AbstractKafkaOperato
private double commitPeriod = DEFAULT_COMMIT_PERIOD;
private CommitMode commitMode = CommitMode.Time;
private String groupId = null;
+ private boolean staticGroupMember = false;
private boolean groupIdSpecified = false;
private Long startTime = -1l;
private long consumerPollTimeout = DEFAULT_CONSUMER_TIMEOUT;
private CountDownLatch resettingLatch;
private CountDownLatch processThreadEndedLatch;
+ private Object monitor = new Object();
private boolean hasOutputTopic;
private boolean hasOutputKey;
private boolean hasOutputOffset;
@@ -127,14 +143,15 @@ public abstract class AbstractKafkaConsumerOperator extends AbstractKafkaOperato
// The number of messages in which the value was malformed and could not be deserialized
private Metric nMalformedMessages;
- private Metric isGroupManagementActive;
+ private Metric nFailedControlTuples;
long maxDrainMillis = 0l;
+
// Initialize the metrics
@CustomMetric (kind = Metric.Kind.GAUGE, name = "isGroupManagementActive", description = "Shows the Kafka group management state of the operator. "
+ "When the metric shows 1, group management is active. When the metric is 0, group management is not in place.")
public void setIsGroupManagementActive (Metric isGroupManagementActive) {
- this.isGroupManagementActive = isGroupManagementActive;
+ // No need to do anything here. The annotation injects the metric into the operator context, from where it can be retrieved.
}
@CustomMetric (kind = Metric.Kind.COUNTER, name = "nDroppedMalformedMessages", description = "Number of dropped malformed messages")
@@ -147,6 +164,11 @@ public void setnPendingMessages(Metric nPendingMessages) {
// No need to do anything here. The annotation injects the metric into the operator context, from where it can be retrieved.
}
+ @CustomMetric (kind = Metric.Kind.COUNTER, description = "Number of failed tuples received on control port", name = "nFailedControlTuples")
+ public void setnFailedControlTuples (Metric m) {
+ this.nFailedControlTuples = m;
+ }
+
@CustomMetric (kind = Metric.Kind.COUNTER, description = "Number times message fetching was paused due to low memory.")
public void setnLowMemoryPause(Metric nLowMemoryPause) {
// No need to do anything here. The annotation injects the metric into the operator context, from where it can be retrieved.
@@ -199,7 +221,7 @@ public void setOutputPartitionAttrName(String outputPartitionAttrName) {
this.outputPartitionAttrName = outputPartitionAttrName;
}
- @Parameter(optional = true, name="startOffset",
+ @Parameter(optional = true, name=START_OFFSET_PARAM,
description="This parameter indicates the start offset that the operator should begin consuming "
+ "messages from. In order for this parameter's values to take affect, the **startPosition** "
+ "parameter must be set to `Offset`. Furthermore, the specific partition(s) that the operator "
@@ -221,7 +243,7 @@ public void setStartOffsets(long[] startOffsets) {
}
}
- @Parameter(optional = true, name="startTime",
+ @Parameter(optional = true, name=START_TIME_PARAM,
description="This parameter is only used when the **startPosition** parameter is set to `Time`. "
+ "Then the operator will begin "
+ "reading records from the earliest offset whose timestamp is greater than or "
@@ -233,19 +255,36 @@ public void setStartTime(Long startTime) {
this.startTime = startTime;
}
- @Parameter(optional = true, name="groupId",
+ @Parameter (optional = true, name = STATIC_GROUP_MEMBER_PARAM,
+ description = "Enables static Kafka group membership (generates and sets a `group.instance.id` "
+ + "overriding a potentially user provided group instance identifier) "
+ + "and sets a higher default session timeout. "
+ + "when set to `true`.\\n"
+ + "\\n"
+ + "This parameter is ignored when group management is not active.\\n"
+ + "\\n"
+ + "**Please note, that the Kafka server version must be at minimum 2.3 to use static group membership.** "
+ + "With lower version, the operator will fail.\\n"
+ + "\\n"
+ + "The default value of this parameter is `false`.")
+ public void setStaticGroupMember (boolean staticGrpMember) {
+ this.staticGroupMember = staticGrpMember;
+ }
+
+ @Parameter(optional = true, name=GROUP_ID_PARAM,
description="Specifies the group ID that should be used "
+ "when connecting to the Kafka cluster. The value "
+ "specified by this parameter will override the `group.id` "
+ "Kafka property if specified. If this parameter is not "
+ "specified and the `group.id` Kafka property is not "
+ "specified, the operator will use a generated group ID, "
- + "and the group management feature is not active.")
+ + "and be a single group member unless the **partition** "
+ + "parameter is used.")
public void setGroupId (String groupId) {
this.groupId = groupId;
}
- @Parameter(optional = true, name="startPosition",
+ @Parameter(optional = true, name=START_POSITION_PARAM,
description="Specifies where the operator should start "
+ "reading from topics. Valid options include: `Beginning`, `End`, `Default`, `Time`, and `Offset`.\\n"
+ "* `Beginning`: The consumer starts reading from the beginning of the data in the Kafka topics. "
@@ -277,7 +316,8 @@ public void setGroupId (String groupId) {
+ "**topic** parameter, and the operator cannot participate in a consumer group.\\n"
+ "\\n"
+ "\\n"
- + "If this parameter is not specified, the start position is `Default`.\\n"
+ + "If this parameter is not specified, the start position is `Default`. "
+ + "This parameter is incompatible with the optional input port.\\n"
+ "\\n"
+ "\\n"
+ "Note, that using a startPosition other than `Default` requires the application always to have a **JobControlPlane** "
@@ -305,7 +345,8 @@ public void setStartPosition(StartPosition startPosition) {
+ "* When using this parameter, the consumer will *assign* the "
+ "consumer to the specified topics partitions, rather than *subscribe* "
+ "to the topics. This implies that the consumer will not use Kafka's "
- + "group management feature.")
+ + "group management feature.\\n"
+ + "* This parameter is incompatible with the optional input port.\\n")
public void setPartitions(int[] partitions) {
if (partitions != null) {
this.partitions = new ArrayList<>(partitions.length);
@@ -317,9 +358,12 @@ public void setPartitions(int[] partitions) {
@Parameter(optional = true, name=TOPIC_PARAM,
description="Specifies the topic or topics that the consumer should "
+ "subscribe to. To assign the consumer to specific partitions, "
- + "use the **partitions** parameter. To specify multiple topics "
- + "from which the operator should consume, separate the the "
- + "topic names by comma, for example `topic: \\\"topic1\\\", \\\"topic2\\\";`.")
+ + "use the **partitions** parameter in addition. To specify multiple topics "
+ + "from which the operator should consume, separate the "
+ + "topic names by comma, for example `topic: \\\"topic1\\\", \\\"topic2\\\";`. "
+ + "To subscribe to multiple topics that match a regular expression, use the **pattern** parameter.\\n"
+ + "\\n"
+ + "This parameter is incompatible with the optional input port.")
public void setTopics(List topics) {
this.topics = topics;
}
@@ -333,14 +377,7 @@ public void setTopics(List topics) {
+ "* `pattern: \\\"myTopic.\\\\*\\\";` subscribes to `myTopic` and all topics that begin with `myTopic`\\n"
+ "* `pattern: \\\".\\\\*Topic\\\";` subscribes to `Topic` and all topics that end at `Topic`\\n"
+ "\\n"
- + "This parameter is incompatible with the **topic** and the **partition** parameter. "
- + "Dynamic subscription with a pattern implies group management. The parameter is therefore "
- + "incompatible with all *operator configurations that disable group management*:\\n"
- + "\\n"
- + "* no group identifier configured (neither via **groupId** parameter nor as `group.id` consumer configuration)\\n"
- + "* presence of an input control port\\n"
- + "* usage of the **partition** parameter\\n"
- + "* usage of **startPosition** parameter with a different value than `Default` (only when not in consistent region)\\n"
+ + "This parameter is incompatible with the **topic** and the **partition** parameters, and with the optional input port.\\n"
+ "\\n"
+ "The regular expression syntax follows the Perl 5 regular expressions with some differences. "
+ "For details see [https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html|Regular Expressions in Java 8].")
@@ -412,6 +449,14 @@ public void setCommitCount (int commitCount) {
this.commitCount = commitCount;
}
+ @ContextCheck(compile = true)
+ public static void warnInputPortDeprecatedWithConsistentRegion (OperatorContextChecker checker) {
+ final OperatorContext operatorContext = checker.getOperatorContext();
+ if (operatorContext.getOptionalContext (ConsistentRegionContext.class) != null && operatorContext.getNumberOfStreamingInputs() > 0) {
+ System.err.println (Messages.getString ("WARN_CONTROLPORT_DEPRECATED_IN_CR", operatorContext.getKind()));
+ }
+ }
+
@ContextCheck(compile = true)
public static void checkStartOffsetRequiresPartition(OperatorContextChecker checker) {
// parameters startOffset and partition must have the same size - can be checked only at runtime.
@@ -435,9 +480,18 @@ public static void checkTriggerCommitCount(OperatorContextChecker checker) {
if (parameterNames.contains(COMMIT_PERIOD_PARAM)) {
System.err.println (Messages.getString ("PARAM_IGNORED_IN_CONSITENT_REGION", COMMIT_PERIOD_PARAM));
}
- if (crContext.isStartOfRegion() && crContext.isTriggerOperator()) {
- if (!parameterNames.contains(TRIGGER_COUNT_PARAM)) {
- checker.setInvalidContext(Messages.getString("TRIGGER_PARAM_MISSING"), new Object[0]); //$NON-NLS-1$
+ if (crContext.isStartOfRegion()) {
+ if (crContext.isTriggerOperator()) {
+ // 'triggerCount' parameter required
+ if (!parameterNames.contains (TRIGGER_COUNT_PARAM)) {
+ checker.setInvalidContext(Messages.getString("TRIGGER_PARAM_MISSING"), new Object[0]); //$NON-NLS-1$
+ }
+ }
+ else {
+ // periodic CR; 'triggerCount' ignored
+ if (parameterNames.contains (TRIGGER_COUNT_PARAM)) {
+ System.err.println (Messages.getString ("PARAM_IGNORED_FOR_PERIODIC_CR", TRIGGER_COUNT_PARAM));
+ }
}
}
}
@@ -454,9 +508,6 @@ public static void checkTriggerCommitCount(OperatorContextChecker checker) {
// @ContextCheck (compile = true)
public static void warnStartPositionParamRequiresJCP (OperatorContextChecker checker) {
- if (!(Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION || Features.ENABLE_NOCR_NO_CONSUMER_SEEK_AFTER_RESTART)) {
- return;
- }
OperatorContext opCtx = checker.getOperatorContext();
Set paramNames = opCtx.getParameterNames();
List> inputPorts = opCtx.getStreamingInputs();
@@ -470,7 +521,7 @@ public static void warnStartPositionParamRequiresJCP (OperatorContextChecker che
@ContextCheck (compile = true)
public static void checkPatternParamCompatibility (OperatorContextChecker checker) {
- // when input port is configured, topic, pattern, partition, and startPosition are ignored
+ // when input port is configured, topic, pattern, partition, and startPosition go into compile error
// Do the checks only when no input port is configured
OperatorContext opCtx = checker.getOperatorContext();
if (opCtx.getNumberOfStreamingInputs() == 0) {
@@ -492,7 +543,7 @@ public static void checkInputPort(OperatorContextChecker checker) {
Set paramNames = checker.getOperatorContext().getParameterNames();
if(inputPorts.size() > 0) {
/*
- * optional input port is present, thus need to ignore the following parameters:
+ * optional input port is present, following parameters go into compiler error:
* * topic
* * pattern
* * partition
@@ -502,7 +553,7 @@ public static void checkInputPort(OperatorContextChecker checker) {
|| paramNames.contains(PATTERN_PARAM)
|| paramNames.contains(PARTITION_PARAM)
|| paramNames.contains(START_POSITION_PARAM)) {
- System.err.println(Messages.getString("PARAMS_IGNORED_WITH_INPUT_PORT")); //$NON-NLS-1$
+ checker.setInvalidContext (Messages.getString("PARAMS_INCOMPATIBLE_WITH_INPUT_PORT"), new Object[0]); //$NON-NLS-1$
}
StreamingInput inputPort = inputPorts.get(0);
@@ -623,10 +674,10 @@ private static void checkUserSpecifiedAttributeNameExists(OperatorContextChecker
Attribute attr = null;
if (paramNames.contains(paramNameToCheck)) {
- String topicAttrName = checker.getOperatorContext().getParameterValues(paramNameToCheck).get(0);
- attr = streamSchema.getAttribute(topicAttrName);
+ String attrName = checker.getOperatorContext().getParameterValues(paramNameToCheck).get(0);
+ attr = streamSchema.getAttribute(attrName);
if(attr == null) {
- checker.setInvalidContext(Messages.getString("OUTPUT_ATTRIBUTE_NOT_FOUND", attr), //$NON-NLS-1$
+ checker.setInvalidContext(Messages.getString("OUTPUT_ATTRIBUTE_NOT_FOUND", attrName), //$NON-NLS-1$
new Object[0]);
}
}
@@ -650,208 +701,228 @@ private static void checkTriggerCountValue (OperatorContextChecker checker) {
@Override
- public synchronized void initialize(OperatorContext context) throws Exception {
- // Must call super.initialize(context) to correctly setup an operator.
- super.initialize (context);
- logger.info ("Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
- + context.getPE().getJobId());
- shutdown = new AtomicBoolean(false);
-
- StreamSchema outputSchema = context.getStreamingOutputs().get(0).getStreamSchema();
- hasOutputKey = outputSchema.getAttribute(outputKeyAttrName) != null;
- hasOutputTopic = outputSchema.getAttribute(outputTopicAttrName) != null;
- hasOutputTimetamp = outputSchema.getAttribute(outputMessageTimestampAttrName) != null;
- hasOutputPartition = outputSchema.getAttribute(outputPartitionAttrName) != null;
- hasOutputOffset = outputSchema.getAttribute(outputOffsetAttrName) != null;
-
- Class> keyClass = hasOutputKey ? getAttributeType(context.getStreamingOutputs().get(0), outputKeyAttrName)
- : String.class; // default to String.class for key type
- Class> valueClass = getAttributeType(context.getStreamingOutputs().get(0), outputMessageAttrName);
- KafkaOperatorProperties kafkaProperties = getKafkaProperties();
-
- // set the group ID property if the groupId parameter is specified
- if (groupId != null && !groupId.isEmpty()) {
- kafkaProperties.setProperty (ConsumerConfig.GROUP_ID_CONFIG, groupId);
- }
- final boolean hasInputPorts = context.getStreamingInputs().size() > 0;
- final String gid = kafkaProperties.getProperty(ConsumerConfig.GROUP_ID_CONFIG);
- this.groupIdSpecified = gid != null && !gid.isEmpty();
- logger.log (DEBUG_LEVEL, "group-ID specified: " + this.groupIdSpecified);
- crContext = context.getOptionalContext (ConsistentRegionContext.class);
- boolean groupManagementEnabled;
-
- if (crContext == null && !Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION)
- groupManagementEnabled = this.groupIdSpecified && !hasInputPorts && (this.partitions == null || this.partitions.isEmpty()) && startPosition == StartPosition.Default;
- else
- groupManagementEnabled = this.groupIdSpecified && !hasInputPorts && (this.partitions == null || this.partitions.isEmpty());
- if (this.groupIdSpecified && !groupManagementEnabled) {
- if (hasInputPorts) {
- logger.warn (MsgFormatter.format ("The group.id ''{0}'' is specified. The ''{1}'' operator "
- + "will NOT participate in a consumer group because the operator is configured with an input port.",
- gid, context.getName()));
- }
- if (this.partitions != null && !this.partitions.isEmpty()) {
- logger.warn (MsgFormatter.format ("The group.id ''{0}'' is specified. The ''{1}'' operator "
- + "will NOT participate in a consumer group because partitions to consume are specified.",
- gid, context.getName()));
+ public void initialize(OperatorContext context) throws Exception {
+ synchronized (monitor) {
+ // Must call super.initialize(context) to correctly setup an operator.
+ super.initialize (context);
+ logger.info ("Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ + context.getPE().getJobId());
+ shutdown = new AtomicBoolean(false);
+
+ StreamSchema outputSchema = context.getStreamingOutputs().get(0).getStreamSchema();
+ hasOutputKey = outputSchema.getAttribute(outputKeyAttrName) != null;
+ hasOutputTopic = outputSchema.getAttribute(outputTopicAttrName) != null;
+ hasOutputTimetamp = outputSchema.getAttribute(outputMessageTimestampAttrName) != null;
+ hasOutputPartition = outputSchema.getAttribute(outputPartitionAttrName) != null;
+ hasOutputOffset = outputSchema.getAttribute(outputOffsetAttrName) != null;
+
+ Class> keyClass = hasOutputKey ? getAttributeType(context.getStreamingOutputs().get(0), outputKeyAttrName)
+ : String.class; // default to String.class for key type
+ Class> valueClass = getAttributeType(context.getStreamingOutputs().get(0), outputMessageAttrName);
+ KafkaOperatorProperties kafkaProperties = getKafkaProperties();
+
+ // set the group ID property if the groupId parameter is specified
+ if (groupId != null && !groupId.isEmpty()) {
+ kafkaProperties.setProperty (ConsumerConfig.GROUP_ID_CONFIG, groupId);
}
- if (startPosition != StartPosition.Default && !Features.ENABLE_NOCR_CONSUMER_GRP_WITH_STARTPOSITION && crContext == null) {
- logger.warn (MsgFormatter.format ("The group.id ''{0}'' is specified. The ''{1}'' operator "
- + "will NOT participate in a consumer group because startPosition != Default is configured.",
- gid, context.getName()));
- }
- }
- // when group management is disabled and no input port is configured, we must not subscribe with pattern
- // When we are here it is already guaranteed that we have one of the 'topic' or 'pattern' parameter
- final boolean p = this.pattern != null;
- final boolean t = this.topics != null;
- assert ((p && !t) || (t && !p));
- if (!groupManagementEnabled && !hasInputPorts && p) {
- final String msg = Messages.getString ("PATTERN_SUBSCRIPTION_REQUIRES_GROUP_MGT", PATTERN_PARAM, context.getName(), context.getKind());
- logger.error (msg);
- throw new KafkaConfigurationException (msg);
- }
- if (crContext != null) {
- commitMode = CommitMode.ConsistentRegionDrain;
- }
- else {
- final Set parameterNames = context.getParameterNames();
- commitMode = parameterNames.contains (COMMIT_COUNT_PARAM)? CommitMode.TupleCount: CommitMode.Time;
- }
- this.isGroupManagementActive.setValue (groupManagementEnabled? 1: 0);
- if (crContext == null) {
- if (groupManagementEnabled) {
- NonCrKafkaConsumerGroupClient.Builder builder = new NonCrKafkaConsumerGroupClient.Builder();
- builder.setOperatorContext(context)
- .setKafkaProperties(kafkaProperties)
- .setKeyClass(keyClass)
- .setValueClass(valueClass)
- .setSingleTopic (this.topics != null && this.topics.size() == 1)
- .setPollTimeout(this.consumerPollTimeout)
- .setInitialStartPosition (this.startPosition)
- .setCommitMode (commitMode)
- .setCommitPeriod (commitPeriod)
- .setCommitCount(commitCount);
- consumer = builder.build();
+ final boolean hasInputPorts = context.getStreamingInputs().size() > 0;
+ final String gid = kafkaProperties.getProperty(ConsumerConfig.GROUP_ID_CONFIG);
+ this.groupIdSpecified = gid != null && !gid.trim().isEmpty();
+ logger.log (DEBUG_LEVEL, "group-ID specified: " + this.groupIdSpecified);
+
+ if (crContext != null) {
+ commitMode = CommitMode.ConsistentRegionDrain;
}
else {
- NonCrKafkaConsumerClient.Builder builder = new NonCrKafkaConsumerClient.Builder();
- builder.setOperatorContext(context)
- .setKafkaProperties(kafkaProperties)
- .setKeyClass(keyClass)
- .setValueClass(valueClass)
- .setPollTimeout(this.consumerPollTimeout)
- .setInitialStartPosition (this.startPosition)
- .setCommitMode (commitMode)
- .setCommitPeriod (commitPeriod)
- .setCommitCount(commitCount);
- consumer = builder.build();
+ final Set parameterNames = context.getParameterNames();
+ commitMode = parameterNames.contains (COMMIT_COUNT_PARAM)? CommitMode.TupleCount: CommitMode.Time;
}
- }
- else {
- if (groupManagementEnabled) {
- CrKafkaConsumerGroupClient.Builder builder = new CrKafkaConsumerGroupClient.Builder();
- builder.setOperatorContext(context)
- .setKafkaProperties(kafkaProperties)
- .setKeyClass (keyClass)
- .setValueClass (valueClass)
- .setPollTimeout (this.consumerPollTimeout)
- .setSingleTopic (this.topics != null && this.topics.size() == 1)
- .setTriggerCount (this.triggerCount)
- .setInitialStartPosition (this.startPosition)
- .setInitialStartTimestamp (this.startTime);
- consumer = builder.build();
+
+ if (this.staticGroupMember) {
+ // calculate a unique group.instance.id that is consistent accross operator restarts
+ final ProcessingElement pe = context.getPE();
+ final int iidH = pe.getInstanceId().hashCode();
+ final int opnH = context.getName().hashCode();
+ final String groupInstanceId = MsgFormatter.format ("i{0}-o{1}",
+ (iidH < 0? "N" + (-iidH): "P" + iidH), (opnH < 0? "N" + (-opnH): "P" + opnH));
+ logger.info ("Generated group.instance.id: " + groupInstanceId);
+ kafkaProperties.put (ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId);
+ }
+ // create the builders for the consumer clients
+ if (crContext == null) {
+ this.groupEnabledClientBuilder = new NonCrKafkaConsumerGroupClient.Builder()
+ .setOperatorContext(context)
+ .setKafkaProperties(kafkaProperties)
+ .setKeyClass(keyClass)
+ .setValueClass(valueClass)
+ .setSingleTopic (this.topics != null && this.topics.size() == 1)
+ .setPollTimeout(this.consumerPollTimeout)
+ .setInitialStartPosition (this.startPosition)
+ .setCommitMode (commitMode)
+ .setCommitPeriod (commitPeriod)
+ .setCommitCount(commitCount);
+
+ this.staticAssignClientBuilder = new NonCrKafkaConsumerClient.Builder()
+ .setOperatorContext(context)
+ .setKafkaProperties(kafkaProperties)
+ .setKeyClass(keyClass)
+ .setValueClass(valueClass)
+ .setPollTimeout(this.consumerPollTimeout)
+ .setInitialStartPosition (this.startPosition)
+ .setCommitMode (commitMode)
+ .setCommitPeriod (commitPeriod)
+ .setCommitCount(commitCount);
}
else {
- CrKafkaStaticAssignConsumerClient.Builder builder = new CrKafkaStaticAssignConsumerClient.Builder();
- builder.setOperatorContext(context)
- .setKafkaProperties(kafkaProperties)
- .setKeyClass(keyClass)
- .setValueClass(valueClass)
- .setPollTimeout(this.consumerPollTimeout)
- .setTriggerCount(this.triggerCount);
- consumer = builder.build();
+ // CR
+ this.groupEnabledClientBuilder = new CrKafkaConsumerGroupClient.Builder()
+ .setOperatorContext(context)
+ .setKafkaProperties(kafkaProperties)
+ .setKeyClass (keyClass)
+ .setValueClass (valueClass)
+ .setPollTimeout (this.consumerPollTimeout)
+ .setSingleTopic (this.topics != null && this.topics.size() == 1)
+ .setTriggerCount (this.triggerCount)
+ .setInitialStartPosition (this.startPosition)
+ .setInitialStartTimestamp (this.startTime);
+
+ this.staticAssignClientBuilder = new CrKafkaStaticAssignConsumerClient.Builder()
+ .setOperatorContext(context)
+ .setKafkaProperties(kafkaProperties)
+ .setKeyClass(keyClass)
+ .setValueClass(valueClass)
+ .setPollTimeout(this.consumerPollTimeout)
+ .setTriggerCount(this.triggerCount);
}
- }
- logger.info (MsgFormatter.format ("consumer client {0} created", consumer.getClass().getName()));
- try {
- consumer.startConsumer();
- }
- catch (KafkaClientInitializationException e) {
-
- e.printStackTrace();
- logger.error(e.getLocalizedMessage(), e);
- logger.error("root cause: " + e.getRootCause());
- throw e;
- }
-
- // input port not used, so topic or pattern must be defined
- if (!hasInputPorts) {
- if (t) { // topics != null
- final boolean registerAsInput = true;
- registerForDataGovernance(context, topics, registerAsInput);
- switch (startPosition) {
- case Time:
- consumer.subscribeToTopicsWithTimestamp (topics, partitions, startTime);
- break;
- case Offset:
- consumer.subscribeToTopicsWithOffsets (topics.get(0), partitions, startOffsets);
- break;
- default:
- consumer.subscribeToTopics (topics, partitions, startPosition);
+ magics.put (this.staticAssignClientBuilder.getImplementationMagic(), this.staticAssignClientBuilder);
+ magics.put (this.groupEnabledClientBuilder.getImplementationMagic(), this.groupEnabledClientBuilder);
+ final ConsumerClientBuilder builder;
+ if (hasInputPorts) {
+ if (crContext != null) {
+ // in CR, we do not groupManagement with input port:
+ builder = this.staticAssignClientBuilder;
+ }
+ else {
+ // not in CR: select the right builder in checkpoint reset or on first partition/topic addition
+ builder = new DummyConsumerClient.Builder()
+ .setOperatorContext (context)
+ .setKafkaProperties (kafkaProperties);
+ magics.put (builder.getImplementationMagic(), builder);
}
}
else {
- switch (startPosition) {
- case Time:
- consumer.subscribeToTopicsWithTimestamp (pattern, startTime);
- break;
- case Beginning:
- case End:
- case Default:
- consumer.subscribeToTopics (pattern, startPosition);
- break;
- default:
- throw new KafkaClientInitializationException ("Illegal 'startPosition' value for subscription with pattern: " + startPosition);
+ boolean groupManagementEnabled;
+ if (Features.ENABLE_GROUP_MANAGEMENT_NO_USER_GROUP_ID) {
+ groupManagementEnabled = this.partitions == null || this.partitions.isEmpty();
+ } else {
+ // legacy (2.x) behavior
+ groupManagementEnabled = this.groupIdSpecified && (this.partitions == null || this.partitions.isEmpty());
+ }
+ if (this.groupIdSpecified && !groupManagementEnabled) {
+ if (this.partitions != null && !this.partitions.isEmpty()) {
+ logger.warn (MsgFormatter.format ("The group.id ''{0}'' is specified. The ''{1}'' operator "
+ + "will NOT participate in a consumer group because partitions to consume are specified.",
+ gid, context.getName()));
+ }
}
+
+ // when group management is disabled and no input port is configured, we must not subscribe with pattern
+ // When we are here it is already guaranteed that we have one of the 'topic' or 'pattern' parameter
+ final boolean p = this.pattern != null;
+ final boolean t = this.topics != null;
+ assert ((p && !t) || (t && !p));
+ if (!groupManagementEnabled && p) {
+ final String msg = Messages.getString ("PATTERN_SUBSCRIPTION_REQUIRES_GROUP_MGT", PATTERN_PARAM, context.getName(), context.getKind());
+ logger.error (msg);
+ throw new KafkaConfigurationException (msg);
+ }
+ builder = groupManagementEnabled? this.groupEnabledClientBuilder: this.staticAssignClientBuilder;
}
- }
- if (crContext != null && context.getPE().getRelaunchCount() > 0) {
- resettingLatch = new CountDownLatch(1);
- }
+ ConsumerClient client = builder.build();
+ consumerRef = new AtomicReference<>(client);
+ logger.info (MsgFormatter.format ("consumer client {0} created", client.getClass().getName()));
+ try {
+ client.startConsumer();
+ }
+ catch (KafkaClientInitializationException e) {
- processThread = getOperatorContext().getThreadFactory().newThread(new Runnable() {
+ e.printStackTrace();
+ logger.error(e.getLocalizedMessage(), e);
+ logger.error("root cause: " + e.getRootCause());
+ throw e;
+ }
- @Override
- public void run() {
- try {
- processThreadEndedLatch = new CountDownLatch (1);
- // initiates start polling if assigned or subscribed by sending an event
- produceTuples();
- } catch (Exception e) {
- Logger.getLogger (this.getClass()).error("Operator error", e); //$NON-NLS-1$
- // Propagate all exceptions to the runtime to make the PE fail and possibly restart.
- // Otherwise this thread terminates leaving the PE in a healthy state without being healthy.
- throw new RuntimeException (e.getLocalizedMessage(), e);
- } finally {
- if (processThreadEndedLatch != null) processThreadEndedLatch.countDown();
- logger.info ("process thread (tid = " + Thread.currentThread().getId() + ") ended.");
+ // input port not used, so topic or pattern must be defined
+ if (!hasInputPorts) {
+ if (this.topics != null) {
+ final boolean registerAsInput = true;
+ registerForDataGovernance(context, topics, registerAsInput);
+ switch (startPosition) {
+ case Time:
+ client.subscribeToTopicsWithTimestamp (topics, partitions, startTime);
+ break;
+ case Offset:
+ client.subscribeToTopicsWithOffsets (topics.get(0), partitions, startOffsets);
+ break;
+ default:
+ client.subscribeToTopics (topics, partitions, startPosition);
+ }
+ }
+ else {
+ switch (startPosition) {
+ case Time:
+ client.subscribeToTopicsWithTimestamp (pattern, startTime);
+ break;
+ case Beginning:
+ case End:
+ case Default:
+ client.subscribeToTopics (pattern, startPosition);
+ break;
+ default:
+ throw new KafkaClientInitializationException ("Illegal 'startPosition' value for subscription with pattern: " + startPosition);
+ }
}
}
- });
- processThread.setDaemon(false);
+ if (crContext != null && context.getPE().getRelaunchCount() > 0) {
+ resettingLatch = new CountDownLatch(1);
+ }
+
+ processThread = getOperatorContext().getThreadFactory().newThread(new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ processThreadEndedLatch = new CountDownLatch (1);
+ // initiates start polling if assigned or subscribed by sending an event
+ produceTuples();
+ } catch (Exception e) {
+ Logger.getLogger (this.getClass()).error("Operator error", e); //$NON-NLS-1$
+ // Propagate all exceptions to the runtime to make the PE fail and possibly restart.
+ // Otherwise this thread terminates leaving the PE in a healthy state without being healthy.
+ throw new RuntimeException (e.getLocalizedMessage(), e);
+ } finally {
+ if (processThreadEndedLatch != null) processThreadEndedLatch.countDown();
+ logger.info ("process thread (tid = " + Thread.currentThread().getId() + ") ended.");
+ }
+ }
+ });
+
+ processThread.setDaemon(false);
+ }
}
@Override
- public synchronized void allPortsReady() throws Exception {
- OperatorContext context = getOperatorContext();
- logger.info ("Operator " + context.getName() + " all ports are ready in PE: " + context.getPE().getPEId() //$NON-NLS-1$ //$NON-NLS-2$
- + " in Job: " + context.getPE().getJobId()); //$NON-NLS-1$
- // start the thread that produces the tuples out of the message queue. The thread runs the produceTuples() method.
- if (processThread != null) {
- processThread.start();
+ public void allPortsReady() throws Exception {
+ synchronized (monitor) {
+ OperatorContext context = getOperatorContext();
+ logger.info ("Operator " + context.getName() + " all ports are ready in PE: " + context.getPE().getPEId() //$NON-NLS-1$ //$NON-NLS-2$
+ + " in Job: " + context.getPE().getJobId()); //$NON-NLS-1$
+ // start the thread that produces the tuples out of the message queue. The thread runs the produceTuples() method.
+ if (processThread != null) {
+ processThread.start();
+ }
}
}
@@ -866,9 +937,9 @@ private void produceTuples() throws Exception {
return;
}
}
-
- if(consumer.isSubscribedOrAssigned()) {
- consumer.sendStartPollingEvent();
+ final ConsumerClient consumerInitial = consumerRef.get();
+ if(consumerInitial.isSubscribedOrAssigned()) {
+ consumerInitial.sendStartPollingEvent();
}
/*
* Shutdown implementation:
@@ -890,6 +961,7 @@ private void produceTuples() throws Exception {
}
}
try {
+ ConsumerClient consumer = consumerRef.get();
// Any exceptions except InterruptedException thrown here are propagated to the caller
// Make timeout for 'getNextRecord' not too high as it influences the granularity of time based offset commit
ConsumerRecord, ?> record = consumer.getNextRecord (100, TimeUnit.MILLISECONDS);
@@ -910,7 +982,7 @@ private void produceTuples() throws Exception {
}
}
try {
- consumer.sendStopPollingEvent();
+ consumerRef.get().sendStopPollingEvent();
}
catch (InterruptedException ie) {
// interrupted during shutdown
@@ -999,38 +1071,111 @@ else if (attrValue instanceof byte[])
@Override
public void processPunctuation (StreamingInput stream, Punctuation mark) throws Exception {
if (mark == Punctuation.FINAL_MARKER) {
- logger.fatal ("Final Marker received at input port. Tuple submission is stopped. Stop fetching records.");
- // make the processThread - the thread that submits tuples and initiates offset commit - terminate
- shutdown.set (true);
- if (processThreadEndedLatch != null) {
- processThreadEndedLatch.await (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
- processThreadEndedLatch = null;
+ synchronized (monitor) {
+ logger.fatal ("Final Marker received at input port. Tuple submission is stopped. Stop fetching records.");
+ // make the processThread - the thread that submits tuples and initiates offset commit - terminate
+ shutdown.set (true);
+ if (processThreadEndedLatch != null) {
+ processThreadEndedLatch.await (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ processThreadEndedLatch = null;
+ }
+ final ConsumerClient consumer = consumerRef.get();
+ consumer.sendStopPollingEvent();
+ consumer.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
}
- consumer.sendStopPollingEvent();
- consumer.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
}
}
@Override
public void process (StreamingInput stream, Tuple tuple) throws Exception {
-
- boolean interrupted = false;
- try {
- TopicPartitionUpdate updt = TopicPartitionUpdate.fromJSON (tuple.getString(0));
- consumer.onTopicAssignmentUpdate (updt);
- } catch (TopicPartitionUpdateParseException e) {
- logger.error("Could not process control tuple. Parsing JSON '" + e.getJson() + "' failed.");
- logger.error (e.getMessage(), e);
- } catch (InterruptedException e) {
- // interrupted during shutdown
- interrupted = true;
- } catch (Exception e) {
- logger.error("Could not process control tuple: '" + tuple + "'");
- logger.error(e.getMessage(), e);
- } finally {
- if (!interrupted && consumer.isSubscribedOrAssigned()) {
- logger.info ("sendStartPollingEvent ...");
- consumer.sendStartPollingEvent();
+ synchronized (monitor) {
+ logger.info ("process >>> ENTRY");
+ boolean interrupted = false;
+ try {
+ final ConsumerClient consumer = consumerRef.get();
+ logger.info ("current consumer implementation: " + consumer);
+ ControlPortAction actn = ControlPortAction.fromJSON (tuple.getString(0));
+ final ControlPortActionType action = actn.getActionType();
+ if (consumer.supports (actn)) {
+ logger.info ("consumer implementation supports " + action);
+ consumer.onControlPortAction (actn);
+ }
+ else {
+ if ((consumer instanceof DummyConsumerClient) && (action == ControlPortActionType.ADD_ASSIGNMENT || action == ControlPortActionType.ADD_SUBSCRIPTION)) {
+ logger.info ("replacing ConsumerClient by a version that supports " + action);
+ // we can change the client implementation
+ if (consumer.isProcessing()) {
+ consumer.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ }
+ final ConsumerClientBuilder builder;
+ if (action == ControlPortActionType.ADD_SUBSCRIPTION) {
+ if (crContext != null) {
+ logger.error ("topic subscription via control port is not supported when the operator is used in a consistent region. Ignoring " + actn.getJson());
+ nFailedControlTuples.increment();
+ logger.info ("process <<< EXIT");
+ return;
+ }
+ builder = this.groupEnabledClientBuilder;
+ }
+ else {
+ if (this.groupIdSpecified) {
+ logger.warn (MsgFormatter.format ("A group.id is specified. The ''{0}'' operator "
+ + "will NOT participate in a consumer group because the operator assigns partitions.",
+ getOperatorContext().getName()));
+ }
+ builder = this.staticAssignClientBuilder;
+ }
+
+ logger.info("Using client builder: " + builder);
+ final ConsumerClient newClient = builder.build();
+ logger.info (MsgFormatter.format ("consumer client {0} created", newClient.getClass().getName()));
+ try {
+ newClient.startConsumer();
+ if (consumerRef.compareAndSet (consumer, newClient)) {
+ logger.info (MsgFormatter.format ("consumer client implementation {0} replaced by {1}",
+ consumer.getClass().getName(),
+ newClient.getClass().getName()));
+ newClient.onControlPortAction (actn);
+ }
+ else {
+ logger.warn (MsgFormatter.format ("consumer client replacement failed"));
+ newClient.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ nFailedControlTuples.increment();
+ }
+ }
+ catch (KafkaClientInitializationException e) {
+ logger.error(e.getLocalizedMessage(), e);
+ logger.error("root cause: " + e.getRootCause());
+ nFailedControlTuples.increment();
+ throw e;
+ }
+ }
+ else {
+ // unsupported action
+ logger.error ("Could not process control tuple. Action " + action + " is not supported by the '" + consumer.getClass().getName() + "' ConsumerClient implementation. Tuple: '" + tuple + "'");
+ nFailedControlTuples.increment();
+ }
+ }
+ } catch (ControlportJsonParseException e) {
+ logger.error("Could not process control tuple. Parsing JSON '" + e.getJson() + "' failed.");
+ logger.error (e.getLocalizedMessage(), e);
+ nFailedControlTuples.increment();
+ } catch (InterruptedException e) {
+ // interrupted during shutdown
+ interrupted = true;
+ nFailedControlTuples.increment();
+ } catch (Exception e) {
+ e.printStackTrace();
+ logger.error("Could not process control tuple: '" + tuple + "':" + e);
+ logger.error (e.getLocalizedMessage(), e);
+ nFailedControlTuples.increment();
+ } finally {
+ final ConsumerClient consumer = consumerRef.get();
+ if (!interrupted && consumer.isSubscribedOrAssigned()) {
+ logger.info ("sendStartPollingEvent ...");
+ consumer.sendStartPollingEvent();
+ }
+ logger.info ("process <<< EXIT");
}
}
}
@@ -1042,27 +1187,31 @@ public void process (StreamingInput stream, Tuple tuple) throws Exception
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
- public synchronized void shutdown() throws Exception {
- final OperatorContext context = getOperatorContext();
- logger.info ("Operator " + context.getName() + " shutting down in PE: " + context.getPE().getPEId() //$NON-NLS-1$ //$NON-NLS-2$
- + " in Job: " + context.getPE().getJobId()); //$NON-NLS-1$
- shutdown.set (true);
- if (processThreadEndedLatch != null) {
- processThreadEndedLatch.await (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
- processThreadEndedLatch = null;
- }
- if (consumer.isProcessing()) {
- consumer.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ public void shutdown() throws Exception {
+ synchronized (monitor) {
+ final OperatorContext context = getOperatorContext();
+ logger.info ("Operator " + context.getName() + " shutting down in PE: " + context.getPE().getPEId() //$NON-NLS-1$ //$NON-NLS-2$
+ + " in Job: " + context.getPE().getJobId()); //$NON-NLS-1$
+ shutdown.set (true);
+ if (processThreadEndedLatch != null) {
+ processThreadEndedLatch.await (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ processThreadEndedLatch = null;
+ }
+ final ConsumerClient consumer = consumerRef.get();
+ if (consumer.isProcessing()) {
+ consumer.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ }
+ logger.info ("Operator " + context.getName() + ": shutdown done");
+ // Must call super.shutdown()
+ super.shutdown();
}
- logger.info ("Operator " + context.getName() + ": shutdown done");
- // Must call super.shutdown()
- super.shutdown();
}
@Override
public void drain() throws Exception {
logger.log (DEBUG_LEVEL, ">>> DRAIN"); //$NON-NLS-1$
long before = System.currentTimeMillis();
+ final ConsumerClient consumer = consumerRef.get();
if (consumer.isProcessing()) {
consumer.onDrain();
}
@@ -1088,6 +1237,7 @@ public void drain() throws Exception {
@Override
public void retireCheckpoint (long id) throws Exception {
logger.debug(">>> RETIRE CHECKPOINT (ckpt id=" + id + ")");
+ final ConsumerClient consumer = consumerRef.get();
if (consumer.isProcessing()) {
consumer.onCheckpointRetire (id);
}
@@ -1096,6 +1246,8 @@ public void retireCheckpoint (long id) throws Exception {
@Override
public void checkpoint(Checkpoint checkpoint) throws Exception {
logger.log (DEBUG_LEVEL, ">>> CHECKPOINT (ckpt id=" + checkpoint.getSequenceId() + ")"); //$NON-NLS-1$ //$NON-NLS-2$
+ final ConsumerClient consumer = consumerRef.get();
+ checkpoint.getOutputStream().writeInt (consumer.getImplementationMagic());
if (consumer.isProcessing()) {
consumer.onCheckpoint (checkpoint);
}
@@ -1108,6 +1260,42 @@ public void reset(Checkpoint checkpoint) throws Exception {
logger.log (DEBUG_LEVEL, MsgFormatter.format(">>> RESET (ckpt id/attempt={0,number,#}/{1})", sequenceId, (crContext == null? "-": "" + attempt)));
final long before = System.currentTimeMillis();
try {
+ final ObjectInputStream inputStream = checkpoint.getInputStream();
+ final int chkptMagic = inputStream.readInt();
+ logger.info ("magic read from checkpoint: " + chkptMagic);
+ ConsumerClient consumer = consumerRef.get();
+ if (chkptMagic == consumer.getImplementationMagic()) {
+ logger.info ("checkpoint fits current ConsumerClient implementation.");
+ }
+ else {
+ logger.info ("checkpoint does not fit current ConsumerClient implementation. Building matching client ...");
+ if (consumer.isProcessing()) {
+ consumer.onShutdown (SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
+ }
+ final ConsumerClientBuilder builder = magics.get (chkptMagic);
+ final ConsumerClient newClient = builder.build();
+ if (consumerRef.compareAndSet (consumer, newClient)) {
+ try {
+ newClient.startConsumer();
+ logger.info (MsgFormatter.format ("consumer client implementation {0} replaced by {1}",
+ consumer.getClass().getName(),
+ newClient.getClass().getName()));
+ }
+ catch (KafkaClientInitializationException e) {
+ logger.error(e.getLocalizedMessage(), e);
+ logger.error("root cause: " + e.getRootCause());
+ throw new KafkaOperatorResetFailedException ("consumer client replacement failed", e);
+ }
+ }
+ else {
+ if (consumerRef.get().getImplementationMagic() != chkptMagic) {
+ logger.warn (MsgFormatter.format ("consumer client replacement failed"));
+ throw new KafkaOperatorResetFailedException ("consumer client replacement failed");
+ }
+ }
+ }
+
+ consumer = consumerRef.get();
if (consumer.isProcessing()) {
// it is up to the consumer client implementation to stop polling.
consumer.onReset (checkpoint);
@@ -1132,6 +1320,7 @@ public void resetToInitialState() throws Exception {
final int attempt = crContext == null? -1: crContext.getResetAttempt();
logger.log (DEBUG_LEVEL, MsgFormatter.format(">>> RESET TO INIT (attempt={0})", attempt));
final long before = System.currentTimeMillis();
+ final ConsumerClient consumer = consumerRef.get();
if (consumer.isProcessing()) {
// it is up to the consumer client implementation to stop polling.
consumer.onResetToInitialState();
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaOperator.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaOperator.java
index fb85c7e1..bb45af6e 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaOperator.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaOperator.java
@@ -22,6 +22,7 @@
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
@@ -45,6 +46,7 @@
import com.ibm.streams.operator.types.RString;
import com.ibm.streamsx.kafka.DataGovernanceUtil;
import com.ibm.streamsx.kafka.IGovernanceConstants;
+import com.ibm.streamsx.kafka.KafkaOperatorException;
import com.ibm.streamsx.kafka.MsgFormatter;
import com.ibm.streamsx.kafka.SystemProperties;
import com.ibm.streamsx.kafka.ToolkitInfoReader;
@@ -54,6 +56,10 @@
@Libraries({ "opt/downloaded/*", "impl/lib/*" })
public abstract class AbstractKafkaOperator extends AbstractOperator implements StateHandler {
+ public static final String CLIENT_ID_PARAM = "clientId";
+ public static final String USER_LIB_PARAM = "userLib";
+ public static final String APP_CONFIG_NAME_PARAM = "appConfigName";
+ public static final String PROPERTIES_FILE_PARAM = "propertiesFile";
public static final String SSL_DEBUG_PARAM = "sslDebug";
private static final Logger logger = Logger.getLogger(AbstractKafkaOperator.class);
@@ -71,14 +77,13 @@ public abstract class AbstractKafkaOperator extends AbstractOperator implements
protected String[] userLib;
protected String clientId = null;
- protected Class> messageType;
- protected Class> keyType;
protected ConsistentRegionContext crContext;
protected CheckpointContext chkptContext;
+ private Boolean inParallelRegion = null;
private KafkaOperatorProperties kafkaProperties;
- @Parameter(optional = true, name="propertiesFile",
+ @Parameter(optional = true, name=PROPERTIES_FILE_PARAM,
description="Specifies the name of the properties file "
+ "containing Kafka properties. A relative path is always "
+ "interpreted as relative to the *application directory* of the "
@@ -87,7 +92,7 @@ public void setPropertiesFile(String propertiesFile) {
this.propertiesFile = propertiesFile;
}
- @Parameter(optional = true, name="appConfigName",
+ @Parameter(optional = true, name=APP_CONFIG_NAME_PARAM,
description="Specifies the name of the application configuration "
+ "containing Kafka properties.")
public void setAppConfigName(String appConfigName) {
@@ -109,7 +114,7 @@ public void setSslDebug (boolean sslDebug) {
}
}
- @Parameter(optional = true, name="userLib",
+ @Parameter(optional = true, name=USER_LIB_PARAM,
description="Allows the user to specify paths to JAR files that should "
+ "be loaded into the operators classpath. This is useful if "
+ "the user wants to be able to specify their own partitioners. "
@@ -121,7 +126,7 @@ public void setUserLib(String[] userLib) {
this.userLib = userLib;
}
- @Parameter(optional = true, name="clientId",
+ @Parameter(optional = true, name=CLIENT_ID_PARAM,
description="Specifies the client ID that should be used "
+ "when connecting to the Kafka cluster. The value "
+ "specified by this parameter will override the `client.id` "
@@ -140,6 +145,15 @@ public void setClientId(String clientId) {
this.clientId = clientId;
}
+ /**
+ * Determines if the operator is used within a parallel region.
+ * @return true, if the operator is used in a parallel region, false otherwise.
+ * @throws KafkaOperatorException The operator is not yet initialized.
+ */
+ public boolean isInParallelRegion() throws KafkaOperatorException {
+ if (inParallelRegion == null) throw new KafkaOperatorException ("operator must be initialized before");
+ return inParallelRegion.booleanValue();
+ }
@Override
public synchronized void initialize(OperatorContext context) throws Exception {
@@ -151,6 +165,10 @@ public synchronized void initialize(OperatorContext context) throws Exception {
catch (Exception e) {
logger.warn ("Could not determine toolkit name and version: " + e);
}
+ List paramNames = new LinkedList (context.getParameterNames());
+ Collections.sort (paramNames);
+ logger.info ("Used operator parameters: " + paramNames);
+ this.inParallelRegion = new Boolean (context.getChannel() >= 0);
crContext = context.getOptionalContext (ConsistentRegionContext.class);
chkptContext = context.getOptionalContext (CheckpointContext.class);
// load the Kafka properties
@@ -252,7 +270,7 @@ protected void loadFromAppConfig() throws Exception {
Map appConfig = getOperatorContext().getPE().getApplicationConfiguration(appConfigName);
if (appConfig.isEmpty()) {
- logger.warn(Messages.getString("APPLICATION_CONFIG_NOT_FOUND", appConfigName)); //$NON-NLS-1$
+ logger.warn(Messages.getString("APPLICATION_CONFIGURATION_NOT_FOUND", appConfigName)); //$NON-NLS-1$
return;
}
@@ -271,6 +289,12 @@ protected KafkaOperatorProperties getKafkaProperties() {
return this.kafkaProperties;
}
+ /**
+ * converts an attribute object to the Java primitive object
+ * @param type Not used
+ * @param attrObj the attribute value as object
+ * @return The corresponding Java primitive
+ */
protected Object toJavaPrimitveObject(Class> type, Object attrObj) {
if(attrObj instanceof RString) {
attrObj = ((RString)attrObj).getString();
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaProducerOperator.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaProducerOperator.java
index 0bf848c6..1cabda7f 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaProducerOperator.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/AbstractKafkaProducerOperator.java
@@ -33,8 +33,10 @@
import com.ibm.streams.operator.StreamingInput;
import com.ibm.streams.operator.Tuple;
import com.ibm.streams.operator.TupleAttribute;
+import com.ibm.streams.operator.Type;
import com.ibm.streams.operator.Type.MetaType;
import com.ibm.streams.operator.compile.OperatorContextChecker;
+import com.ibm.streams.operator.meta.OptionalType;
import com.ibm.streams.operator.meta.TupleType;
import com.ibm.streams.operator.metrics.Metric;
import com.ibm.streams.operator.model.CustomMetric;
@@ -44,7 +46,7 @@
import com.ibm.streams.operator.state.ConsistentRegionContext;
import com.ibm.streamsx.kafka.PerformanceLevel;
import com.ibm.streamsx.kafka.clients.producer.ConsistentRegionPolicy;
-import com.ibm.streamsx.kafka.clients.producer.KafkaProducerClient;
+import com.ibm.streamsx.kafka.clients.producer.AbstractKafkaProducerClient;
import com.ibm.streamsx.kafka.clients.producer.TrackingProducerClient;
import com.ibm.streamsx.kafka.clients.producer.TransactionalCrProducerClient;
import com.ibm.streamsx.kafka.i18n.Messages;
@@ -83,7 +85,10 @@ public abstract class AbstractKafkaProducerOperator extends AbstractKafkaOperato
protected TupleAttribute timestampAttr;
protected List topics;
- private KafkaProducerClient producer;
+ protected Class> messageType;
+ protected Class> keyType;
+
+ private AbstractKafkaProducerClient producer;
private AtomicBoolean isResetting;
private String keyAttributeName = null;
private String partitionAttributeName = null;
@@ -145,25 +150,30 @@ public void setConsistentRegionPolicy(ConsistentRegionPolicy consistentRegionPol
description = "If set to true, the operator guarantees that the order of records within "
+ "a topic partition is the same as the order of processed tuples when it comes "
+ "to retries. This implies that the operator sets the "
- + "`max.in.flight.requests.per.connection` producer property automatically to 1 "
- + "if retries are enabled, i.e. when the `retries` property is unequal 0, what "
- + "is the operator default value.\\n"
+ + "`enable.idempotence` producer config automatically to `true`, `acks` to `all`, "
+ + "enables retries, and adjusts `max.in.flight.requests.per.connection` to an upper limit of 5.\\n"
+ "\\n"
+ "If unset, the default value of this parameter is `false`, which means that the "
- + "order can change due to retries. Please be aware that setting "
+ + "order can change due to retries as long as the producer configuration "
+ + "`max.in.flight.requests.per.connection` is greater than 1.\\n"
+ + "\\n"
+ + "**Note for users of Kafka 0.10.x:**\\n"
+ + "\\n"
+ + "The idempotent producer is not supported for Kafka versions < 0.11. "
+ + "When guaranteed record order is required with older Kafka servers, users must set the producer config "
+ + "`max.in.flight.requests.per.connection=1` instead of setting **"
+ GUARANTEE_ORDERING_PARAM_NAME
- + " to `true` degrades the producer throughput as only one PRODUCE request per topic partition "
- + "is active at any time.")
+ + "** to `true`.")
public void setGuaranteeOrdering (boolean guaranteeOrdering) {
this.guaranteeOrdering = guaranteeOrdering;
}
@Parameter(optional = true, name = OUTPUT_ERRORS_ONLY_PARM_NAME,
description = "If set to `true`, the operator submits tuples to the optional output port only "
- + "for the tuples that failed to produce completely. "
+ + "for the tuples that failed to produce. "
+ "If set to `false`, the operator submits also tuples for the successfully produced input tuples.\\n"
+ "\\n"
- + "If unset, the default value of this parameter is " + O_PORT_SUBMIT_ONLY_ERRORS + ". "
+ + "If unset, the default value of this parameter is `" + O_PORT_SUBMIT_ONLY_ERRORS + "`. "
+ "This parameter is ignored when the operator is not configured with an output port.")
public void setOutputErrorsOnly (boolean errsOnly) {
this.outputErrorsOnly = errsOnly;
@@ -291,7 +301,8 @@ public static void checkErrorPortSchema (OperatorContextChecker checker) {
int nStringAttrs = 0;
for (String outAttrName: outSchema.getAttributeNames()) {
Attribute attr = outSchema.getAttribute (outAttrName);
- MetaType metaType = attr.getType().getMetaType();
+ Type attrType = attr.getType();
+ MetaType metaType = attrType.getMetaType();
switch (metaType) {
case TUPLE:
++nTupleAttrs;
@@ -305,6 +316,17 @@ public static void checkErrorPortSchema (OperatorContextChecker checker) {
case USTRING:
++nStringAttrs;
break;
+ case OPTIONAL:
+ MetaType optionalValueMeta = ((OptionalType)attrType).getValueType().getMetaType();
+ switch (optionalValueMeta) {
+ case RSTRING:
+ case USTRING:
+ ++nStringAttrs;
+ break;
+ default:
+ checker.setInvalidContext (Messages.getString("PRODUCER_INVALID_OPORT_SCHEMA", opCtx.getKind()), new Object[0]); //$NON-NLS-1$
+ }
+ break;
default:
checker.setInvalidContext (Messages.getString("PRODUCER_INVALID_OPORT_SCHEMA", opCtx.getKind()), new Object[0]); //$NON-NLS-1$
}
@@ -580,7 +602,7 @@ public synchronized void shutdown() throws Exception {
+ context.getPE().getPEId() + " in Job: " + context.getPE().getJobId()); //$NON-NLS-1$
producer.flush();
- producer.close (KafkaProducerClient.CLOSE_TIMEOUT_MS);
+ producer.close (AbstractKafkaProducerClient.CLOSE_TIMEOUT_MS);
if (this.errorPortSubmitter != null) this.errorPortSubmitter.stop();
// Must call super.shutdown()
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaConsumerOperator.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaConsumerOperator.java
index bbfcec79..64e8e585 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaConsumerOperator.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaConsumerOperator.java
@@ -24,19 +24,46 @@
@PrimitiveOperator(name = "KafkaConsumer", namespace = "com.ibm.streamsx.kafka", description=KafkaConsumerOperator.DESC)
@InputPorts({
@InputPortSet (description = ""
- + "This port is used to specify the topic-partition offsets that the consumer should begin reading messages from. When this "
- + "port is specified, the operator will ignore the `topic`, `partition` and `startPosition` parameters. The operator will only begin "
- + "consuming messages once a tuple is received on this port. Each tuple received on this port will cause the operator to "
- + "seek to the offsets for the specified topic-partitions. This works as follows: "
+ + "This port is used to specify the topics or topic partitions that the consumer should begin reading messages from. When this "
+ + "port is specified, the `topic`, `partition` and `startPosition` parameters cannot be used. The operator will only begin "
+ + "consuming messages once a tuple is received on this port, which specifies a partition assignment or a topic subscription.\\n"
+ "\\n"
- + " * To seek to the beginning of a topic-partition, set the value of the offset to `-2.`\\n"
- + " * To seek to the end of a topic-partition, set the value of the offset attribute to `-1.`\\n"
- + " * To start fetching from the default position, omit the offset attribute or set the value of the offset to `-3`\\n"
- + " * Any other value will cause the operator to seek to that offset value. If that value does not exist, then the operator will use the "
- + "`auto.offset.reset` policy to determine where to begin reading messages from.\\n"
+ + "When the KafkaConsumer participates in a consistent region, only partition assignment via control port is supported. **The support "
+ + "of consistent region with the control port is deprecated and may be removed in next major toolkit version.**\\n"
+ + "\\n"
+ + "When a *topic subscription* is specified, the operator benefits from Kafka's group management (Kafka assigns the partitions to consume). "
+ + "When an assignment is specified in a control tuple, the operator self-assigns to the given partition(s). "
+ + "Assignments and subscriptions via control port cannot be mixed. Note, that it is not possible to use both assignment and subscription, "
+ + "it is also not possible to subscribe after a previous assignment and unassignment, and vice versa.\\n"
+ + "This input port must contain a single `rstring` attribute that takes a JSON formatted string.\\n"
+ + "\\n"
+ + "**Adding or removing a topic subscription**\\n"
+ + "\\n"
+ + "To add or remove a topic subscription, the single `rstring` attribute must contain "
+ + "a JSON string in the following format:\\n"
+ + "\\n"
+ + " {\\n"
+ + " \\\"action\\\" : \\\"ADD\\\" or \\\"REMOVE\\\",\\n"
+ + " \\\"topics\\\" : [\\n"
+ + " {\\n"
+ + " \\\"topic\\\" : \\\"topic-name\\\"\\n"
+ + " },\\n"
+ + " ...\\n"
+ + " ]\\n"
+ + " }\\n"
+ + "\\n"
+ + "The following types and convenience functions are available to aid in creating the JSON string: \\n"
+ + "\\n"
+ + "* `rstring createMessageAddTopic (rstring topic);`\\n"
+ + "* `rstring createMessageAddTopics (list topics);`\\n"
+ + "* `rstring createMessageRemoveTopic (rstring topic)`\\n"
+ + "* `rstring createMessageRemoveTopics (list topics);`\\n"
+ "\\n"
- + "This input port must contain a single `rstring` attribute. In order to add or remove a topic partition, the attribute must contain "
- + "a JSON string in the following format: \\n"
+ + "\\n"
+ + "**Adding or removing a manual partition assignment**\\n"
+ + "\\n"
+ + "To add or remove a topic partition assignment, the single `rstring` attribute must contain "
+ + "a JSON string in the following format:\\n"
+ "\\n"
+ " {\\n"
+ " \\\"action\\\" : \\\"ADD\\\" or \\\"REMOVE\\\",\\n"
@@ -50,16 +77,25 @@
+ " ]\\n"
+ " }\\n"
+ "\\n"
- + "The following types and convenience functions are available to aid in creating the messages: \\n"
+ + "The `offset` element is optional. It specifies the offset of the first record to consume from "
+ + "the topic partition. This works as follows: "
+ + "\\n"
+ + " * To seek to the beginning of a topic-partition, set the value of the offset to `-2.`\\n"
+ + " * To seek to the end of a topic-partition, set the value of the offset attribute to `-1.`\\n"
+ + " * To start fetching from the default position, omit the offset attribute or set the value of the offset to `-3`\\n"
+ + " * Any other value will cause the operator to seek to that offset value. If that value does not exist, then the operator will use the "
+ + "`auto.offset.reset` policy to determine where to begin reading messages from.\\n"
+ + "\\n"
+ + "The following types and convenience functions are available to aid in creating the JSON string: \\n"
+ "\\n"
+ "* `type Control.TopicPartition = rstring topic, int32 partition;`\\n"
+ "* `type Control.TopicPartitionOffset = rstring topic, int32 partition, int64 offset;`\\n"
- + "* `rstring addTopicPartitionMessage (rstring topic, int32 partition, int64 offset);`\\n"
- + "* `rstring addTopicPartitionMessage (rstring topic, int32 partition);`\\n"
- + "* `rstring addTopicPartitionMessage (list topicPartitionsToAdd);`\\n"
- + "* `rstring addTopicPartitionMessage (list topicPartitionsToAdd);`\\n"
- + "* `rstring removeTopicPartitionMessage (rstring topic, int32 partition);`\\n"
- + "* `rstring removeTopicPartitionMessage (list topicPartitionsToRemove);`\\n"
+ + "* `rstring createMessageRemoveTopicPartition (rstring topic, int32 partition);`\\n"
+ + "* `rstring createMessageAddTopicPartition (rstring topic, int32 partition, int64 offset);`\\n"
+ + "* `rstring createMessageAddTopicPartition (list topicPartitionsToAdd);`\\n"
+ + "* `rstring createMessageAddTopicPartition (list topicPartitionsToAdd);`\\n"
+ + "* `rstring createMessageRemoveTopicPartition (rstring topic, int32 partition);`\\n"
+ + "* `rstring createMessageRemoveTopicPartition (list topicPartitionsToRemove);`\\n"
+ "\\n"
+ "**Important Note:** This input port must not receive a final punctuation. Final markers are automatically "
+ "forwarded causing downstream operators close their input ports. When this input port receives a final marker, "
@@ -67,7 +103,7 @@
cardinality = 1, optional = true, controlPort = true)})
@OutputPorts({
@OutputPortSet(description = "This port produces tuples based on records read from the Kafka topic(s). A tuple will be output for "
- + "each record read from the Kafka topic(s).", cardinality = 1, optional = false, windowPunctuationOutputMode = WindowPunctuationOutputMode.Generating)})
+ + "each record read from the Kafka topic(s).", cardinality = 1, optional = false, windowPunctuationOutputMode = WindowPunctuationOutputMode.Free)})
@Icons(location16 = "icons/KafkaConsumer_16.gif", location32 = "icons/KafkaConsumer_32.gif")
public class KafkaConsumerOperator extends AbstractKafkaConsumerOperator {
@@ -109,6 +145,8 @@ public class KafkaConsumerOperator extends AbstractKafkaConsumerOperator {
+ "\\n"
+ KafkaSplDoc.CONSUMER_KAFKA_GROUP_MANAGEMENT
+ "\\n"
+ + KafkaSplDoc.CONSUMER_STATIC_GROUP_MEMBERSHIP
+ + "\\n"
+ KafkaSplDoc.CONSUMER_CHECKPOINTING_CONFIG
+ "\\n"
+ KafkaSplDoc.CONSUMER_RESTART_BEHAVIOUR
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaProducerOperator.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaProducerOperator.java
index e2a9a730..8dcc0780 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaProducerOperator.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaProducerOperator.java
@@ -38,8 +38,10 @@
+ "may also differ from the sequence of the input tuples. Window punctuations from the input stream are not forwarded.\\n"
+ "\\n"
+ "The schema of the output port must consist of one optional attribute of tuple type with the same schema "
- + "as the input port and one optional attribute of type `rstring` or `ustring`, that takes a JSON formatted description "
- + "of the occured error, or remains empty (zero length) for successfully produced tuples. "
+ + "as the input port and one optional attribute of type `rstring`, `ustring`, `optional`, or `optional`, "
+ + "that takes a JSON formatted description "
+ + "of the occured error, or remains *empty* for successfully produced tuples. Emptiness of the attribute means that the attribute contains a "
+ + "string with zero length when declared as `rstring` or `ustring`, and an empty optional (optional without a value) when declared as optional. "
+ "Both attributes can have any names and can be declared in any sequence in the schema.\\n"
+ "\\n"
+ "**Example for declaring the output stream as error output:**\\n"
@@ -61,8 +63,7 @@
+ "\\n"
+ "**Example for declaring the output stream for both successfully produced input tuples and failures:**\\n"
+ "\\n"
- + " // 'failure' attribute will have zero length for successfully produced input tuples\\n"
- + " stream ProduceStatus = KafkaProducer (Data as Inp) {\\n"
+ + " stream failure> ProduceStatus = KafkaProducer (Data as Inp) {\\n"
+ " param\\n"
+ " outputErrorsOnly: false;\\n"
+ " ...\\n"
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaSplDoc.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaSplDoc.java
index a9bfb2ad..f5eb2f9d 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaSplDoc.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/KafkaSplDoc.java
@@ -1,10 +1,10 @@
/*
- * Licensed under the Apache License, Version 2.0 (the "License");
+ * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this except in compliance with the License.
* You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -23,21 +23,22 @@ public class KafkaSplDoc {
+ "The operator can be configured for operator driven and periodic checkpointing. Checkpointing "
+ "is in effect when the operator is configured with an input port. When the operator has no input "
+ "port, checkpointing can be configured, but is silently ignored. The operator "
- + "checkpoints the current partition assignment, which is modified via control tuples received "
+ + "checkpoints the current partition assignment or topic subscription, which is modified via control tuples received "
+ "by the input port. The current fetch positions are not saved.\\n"
+ "\\n"
- + "On reset, the partition assignment is restored from the checkpoint. The fetch offsets will "
+ + "On reset, the partition assignment or topic subscription is restored from the checkpoint. The fetch offsets will "
+ "be the last committed offsets.\\n"
+ "\\n"
- + "With `config checkpoint: operatorDriven;` the operator creates a checkpoint when the partition "
- + "assignment changes, i.e. after each input tuple has been processed.\\n"
+ + "With `config checkpoint: operatorDriven;` the operator creates a checkpoint when the manual partition "
+ + "assignment or the topic subscription changed, i.e. after each input tuple has been processed, which "
+ + "changed the assignment or subscription of the operator.\\n"
+ "\\n"
;
public static final String CONSUMER_WHERE_TO_FIND_PROPERTIES = ""
- + "The operator implements Kafka's KafkaConsumer API of the Kafka client version 2.2.1. As a result, it supports all "
- + "Kafka configurations that are supported by the underlying API. The consumer configs for the Kafka consumer 2.2 "
- + "can be found in the [https://kafka.apache.org/22/documentation.html#consumerconfigs|Apache Kafka 2.2 documentation].\\n"
+ + "The operator implements Kafka's KafkaConsumer API of the Kafka client version 2.3.1. As a result, it supports all "
+ + "Kafka configurations that are supported by the underlying API. The consumer configs for the Kafka consumer 2.3 "
+ + "can be found in the [https://kafka.apache.org/documentation.html#consumerconfigs|Apache Kafka 2.3 documentation].\\n"
+ "\\n"
+ "When you reference files within your application, which are bundled with the Streams application bundle, for example "
+ "an SSL truststore or a key tab file for Kerberos authentication, you can use the `{applicationDir}` placeholder in the "
@@ -61,9 +62,15 @@ public class KafkaSplDoc {
+ "---\\n"
+ "| Property Name | Default Value |\\n"
+ "|===|\\n"
- + "| client.id | Generated ID in the form: `C-J-` |\\n"
+ + "| auto.commit.enable | adjusted to `false` |\\n"
+ + "|---|\\n"
+ + "| client.id | Generated ID in the form: `C-J-` when not user provided, when user provided and used in parallel region, the parallel channel number is added. |\\n"
+ + "|---|\\n"
+ + "| group.id | constructed from hashes of instance-ID, job-ID, and operator name |\\n"
+ "|---|\\n"
- + "| group.id | hash from domain-ID, instance-ID, job-ID, and operator name |\\n"
+ + "| group.instance.id | when **staticGroupMember** parameter is `true`: hashes from instance-ID and operator name. When user provided and used in a parallel region, the parallel channel number is added. |\\n"
+ + "|---|\\n"
+ + "| isolation.level | `read_committed` |\\n"
+ "|---|\\n"
+ "| key.deserializer | See **Automatic deserialization** section below |\\n"
+ "|---|\\n"
@@ -71,15 +78,13 @@ public class KafkaSplDoc {
+ "|---|\\n"
+ "| partition.assignment.strategy | **Only when multiple topics are specified:** `org.apache.kafka.clients.consumer.RoundRobinAssignor` |\\n"
+ "|---|\\n"
- + "| auto.commit.enable | adjusted to `false` |\\n"
- + "|---|\\n"
+ "| max.poll.interval.ms | adjusted to a minimum of *3 \\\\* max (reset timeout, drain timeout)* when in consistent region, 300000 otherwise |\\n"
+ "|---|\\n"
+ "| metadata.max.age.ms | adjusted to a maximum of 2000 |\\n"
+ "|---|\\n"
- + "| session.timeout.ms | adjusted to a maximum of 20000 |\\n"
+ + "| session.timeout.ms | when unset, set to max (1.2 \\\\* reset timeout, 120000) when the operator is a static consumer group member, to 20000 otherwise |\\n"
+ "|---|\\n"
- + "| request.timeout.ms | adjusted to session.timeout.ms \\\\+ 5000 |\\n"
+ + "| request.timeout.ms | adjusted to 25000 when the operator is a static consumer group member, to `session.timeout.ms` \\\\+ 5000 otherwise |\\n"
+ "|---|\\n"
+ "| metric.reporters | added to provided reporters: `com.ibm.streamsx.kafka.clients.consumer.ConsumerMetricsReporter` |\\n"
+ "|---|\\n"
@@ -130,20 +135,20 @@ public class KafkaSplDoc {
+ "are supported. If using operator driven, the **triggerCount** parameter must be set to "
+ "indicate how often the operator should initiate a consistent region.\\n"
+ "\\n"
- + "When a group-ID is specified via the consumer property `group.id` or the **groupId** parameter, the operator "
- + "participates automatically in a consumer group defined by the group ID. A consistent region can have "
- + "multiple consumer groups.\\n"
+ + "Unless the operator is configured with an input port or the **partition** parameter is used, the operator "
+ + "participates automatically in a consumer group defined by a user provided or operator generated group ID. "
+ + "A consistent region can have multiple consumer groups.\\n"
+ "\\n"
+ "**Tuple replay after reset of the consistent region**\\n"
+ "\\n"
+ "After reset of the consistent region, the operators that participate in a consumer group may replay tuples that "
+ "have been submitted by a different consumer before. The reason for this is, that the assignment of partitions to consumers "
- + "can change. This property of a consumer group must be kept in mind when combining a consumer groups with "
+ + "can change. This property of a consumer group must be kept in mind when combining consumer groups with "
+ "consistent region.\\n"
+ "\\n"
- + "When **no group-ID is specified**, the partition assignment is static (a consumer consumes "
- + "all partitions or those, which are specified), so that the consumer operator replays after consistent region "
- + "reset those tuples, which it has submitted before.\\n"
+ + "When the **partition** parameter is used or the operator has an input port, the partition assignment is static "
+ + "(a consumer consumes all partitions or those, which are specified), so that the consumer operator replays after "
+ + "consistent region reset those tuples, which it has submitted before.\\n"
+ "\\n"
+ "\\n"
+ "When the consumers of a consumer group rebalance the partition assignment, for example, immediately after job "
@@ -184,14 +189,15 @@ public class KafkaSplDoc {
+ "**1. The partitions within a consumer group are rebalanced.** Before new partitions are assigned, "
+ "the offsets of the currently assigned partitions are committed. When the partitions are re-assigned, "
+ "the operators start fetching from these committed offsets. The periodic commit controlled by the "
- + "**commitCount** or **commitPeriod** parameter is reset after rebalance.\\n"
+ + "**commitCount** or **commitPeriod** parameter is reset after rebalance. "
+ + "A partition rebalance happens every time a subscription via control port is changed.\\n"
+ "\\n"
+ "**2. Offsets are committed periodically.** The period can be a time period or a tuple count. "
+ "If nothing is specified, offsets are committed every 5 seconds. The time period can be specified with the "
+ "**commitPeriod** parameter. When the **commitCount** parameter is used with a value of N, offsets "
+ "are committed every N submitted tuples.\\n"
+ "\\n"
- + "**3. Partition assignment via control port is removed.** The offsets of those partitions which are de-assigned are committed.\\n"
+ + "**3. Partition assignment or subscription via control port is removed.** The offsets of those partitions which are de-assigned are committed.\\n"
+ "\\n"
+ "**b) The operator is part of a consistent region**\\n"
+ "\\n"
@@ -271,7 +277,7 @@ public class KafkaSplDoc {
+ "\\n"
+ "*b) The consumer operator is configured with a control input port*\\n"
+ "\\n"
- + "When the operator is configured with an input port, the partition assignments, "
+ + "When the operator is configured with an input port, the partition assignments or subscription, "
+ "which have been created via the control stream, are lost. It is therefore recommended to fuse the "
+ "consumer operator with the source of the control stream to replay the control tuples "
+ "after restart or to use a `config checkpoint` clause, preferably `operatorDriven`, "
@@ -294,15 +300,15 @@ public class KafkaSplDoc {
+ "In order for the operator to use this function, the following requirements "
+ "must be met\\n"
+ "\\n"
- + "* A `group.id` consumer property must be given. The group-ID defines which operators belong to a consumer group. When no group-ID is given, "
- + "group management will not be in place. "
+ + "* A `group.id` consumer property must be given when multiple consumer operators need to participate in the consumer group. "
+ + "The group-ID defines which operators belong to a consumer group. When no group-ID is given, the operator will create a "
+ + "unique group identifier and will be a single group member. "
+ "The `group.id` can be specified via property file, app configuration, or the **groupId** parameter.\\n"
- + "* The operator must not be configured with the optional input port.\\n"
+ + "* When in a consistent region, the operator must not be configured with the optional input port.\\n"
+ "* The **partition** parameter must not be used.\\n"
+ "\\n"
+ "The other way round, group management is inactive in following cases\\n"
- + "* when no group ID is specified, or\\n"
- + "* when the operator is configured with the optional input port, or\\n"
+ + "* when the operator is in a consistent region and is configured with the optional input port, or\\n"
+ "* when the **partition** parameter is specified.\\n"
+ "\\n"
+ "In a consistent region, a consumer group must not have consumers outside of the consistent region, "
@@ -329,6 +335,33 @@ public class KafkaSplDoc {
+ "\\n"
;
+ public static final String CONSUMER_STATIC_GROUP_MEMBERSHIP = ""
+ + "# Static Consumer Group Membership\\n"
+ + "\\n"
+ + "**To benefit from this feature, the Kafka server must be at minimum version 2.3.**\\n"
+ + "\\n"
+ + "Since version 2.3, Kafka supports [https://kafka.apache.org/documentation/#static_membership|static group membership], "
+ + "more detailed in this [https://www.confluent.io/blog/kafka-rebalance-protocol-static-membership|confluent blog post].\\n"
+ + "\\n"
+ + "Streams applications can benefit from this feature as it avoids unnecessary consumer group rebalances "
+ + "when PEs are re-launched. With dynamic group members (the traditional behavior), a consumer actively leaves the "
+ + "group when its PE is re-launched, leading to an immediate partition rebalance among the remaining consumers.\\n"
+ + "\\n"
+ + "With static group membership, a consumer leaves the group only based on the session timeout, i.e. when the broker "
+ + "does not receive the heartbeat from the consumer for longer than the session timeout. When the session timeout is "
+ + "high enough, the remaining consumers in a consumer group within an autonomous region will not be affected from the "
+ + "restart of another consumer. They will continue consuming the assigned partitions without being "
+ + "re-assigned to partitions and reset to their last committed offsets.\\n"
+ + "A consumer group within a consistent region benefits also when unnecessary rebalances are avoided because a rebalance "
+ + "always triggers a reset of the consistent region. That's why this feature can help avoid unnecessary "
+ + "consistent region resets.\\n"
+ + "\\n"
+ + "You can enable this feature by setting either the consumer config `group.instance.id` to a unique value "
+ + "within the consumer group, or by setting the **staticGroupMember** parameter to `true`, which creates a uniqe `group.instance.id`. "
+ + "When a `group.instance.id` is detected, the operator automatically increases the default session timeout when "
+ + "no user-provided session timeout is given."
+ + "";
+
public static final String PRODUCER_CHECKPOINTING_CONFIG = ""
+ "# Checkpointing behavior in an autonomous region\\n"
+ "\\n"
@@ -338,10 +371,10 @@ public class KafkaSplDoc {
public static final String PRODUCER_WHERE_TO_FIND_PROPERTIES = ""
- + "The operator implements Kafka's KafkaProducer API of the Kafka client version 2.2.1. As a result, "
+ + "The operator implements Kafka's KafkaProducer API of the Kafka client version 2.3.1. As a result, "
+ "it supports all Kafka properties that are supported by the "
- + "underlying API. The producer properties for the Kafka producer 2.2 "
- + "can be found in the [https://kafka.apache.org/22/documentation/#producerconfigs|Apache Kafka 2.2 documentation].\\n"
+ + "underlying API. The producer properties for the Kafka producer 2.3 "
+ + "can be found in the [https://kafka.apache.org/documentation/#producerconfigs|Apache Kafka 2.3 documentation].\\n"
+ "\\n"
+ "When you reference files within your application, which are bundled with the Streams application bundle, for example "
+ "an SSL truststore or a key tab file for Kerberos authentication, you can use the `{applicationDir}` placeholder in the "
@@ -371,19 +404,19 @@ public class KafkaSplDoc {
+ "|---|\\n"
+ "| value.serializer | See **Automatic Serialization** section below |\\n"
+ "|---|\\n"
- + "| acks | Controls the durability of records that are sent. Adjusted to `all` when in consistent region, and **consistentRegionPolicy** parameter is `Transactional`, otherwise `acks` is unchanged. The value `0` (fire and forget) is not recommended. |\\n"
+ + "| acks | Controls the durability of records that are sent. Adjusted to `all` when the **guaranteeOrdering** parameter is `true`, or when the **consistentRegionPolicy** parameter is `Transactional` in consistent region. Otherwise `acks` is unchanged. The value `0` (fire and forget) is not recommended. |\\n"
+ "|---|\\n"
- + "| retries | When `0` is provided as **retries** and **consistentRegionPolicy** parameter is `Transactional` **retries** is adjusted to `1`. |\\n"
+ + "| retries | When the **guaranteeOrdering** parameter is `true`, or when the **consistentRegionPolicy** parameter is `Transactional` in consistent region, **retries** is adjusted to a minimum of `1`. |\\n"
+ "|---|\\n"
+ "| linger.ms | `100` |\\n"
+ "|---|\\n"
+ "| batch.size | `32768` |\\n"
+ "|---|\\n"
- + "| max.in.flight.requests.per.connection | `1` when **guaranteeOrdering** parameter is `true`, limited to `5` when provided and **consistentRegionPolicy** parameter is `Transactional`, or `10` in all other cases. |\\n"
+ + "| max.in.flight.requests.per.connection | Limited to `5` when **guaranteeOrdering** parameter is `true`, or when **consistentRegionPolicy** parameter is `Transactional` in consistent region. `10` in all other cases when unset. |\\n"
+ "|---|\\n"
- + "| enable.idempotence | `true` only when in consistent region and **consistentRegionPolicy** parameter is set to `Transactional`. |\\n"
+ + "| enable.idempotence | `true` when **guaranteeOrdering** parameter is `true`, or when in consistent region and the **consistentRegionPolicy** parameter is `Transactional`. |\\n"
+ "|---|\\n"
- + "| transactional.id | Randomly generated ID in the form: `tid-` only when in consistent region and **consistentRegionPolicy** parameter is set to `Transactional`. |\\n"
+ + "| transactional.id | Randomly generated ID in the form: `tid-` only when in consistent region and the **consistentRegionPolicy** parameter is set to `Transactional`. |\\n"
+ "|---|\\n"
+ "| transaction.timeout.ms | adjusted to a minimum of `drain timeout \\\\+ 120000 milliseconds`, but not greater than `900000`. Adjusted only when in consistent region and **consistentRegionPolicy** parameter is set to `Transactional`. |\\n"
+ "|---|\\n"
@@ -402,7 +435,7 @@ public class KafkaSplDoc {
+ "\\n"
+ "**NOTE:** Although properties are adjusted, users can override any of the above properties by explicitly setting "
+ "the property value in either a properties file or in an application configuration. Not all properties or possible property values, which can be "
- + "specified for the Kafka producer version 2.2, are supported by all Broker versions. An example for is the Zstandard "
+ + "specified for the Kafka producer version 2.3, are supported by all Broker versions. An example for such a config is the Zstandard "
+ "compression algorithm, which is supported with broker version 2.1 and above.\\n"
;
diff --git a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/OutputPortSubmitter.java b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/OutputPortSubmitter.java
index 7f5860de..1f9f7ea6 100644
--- a/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/OutputPortSubmitter.java
+++ b/com.ibm.streamsx.kafka/impl/java/src/com/ibm/streamsx/kafka/operators/OutputPortSubmitter.java
@@ -29,7 +29,9 @@
import com.ibm.streams.operator.StreamSchema;
import com.ibm.streams.operator.StreamingOutput;
import com.ibm.streams.operator.Tuple;
+import com.ibm.streams.operator.Type;
import com.ibm.streams.operator.Type.MetaType;
+import com.ibm.streams.operator.meta.OptionalType;
import com.ibm.streams.operator.meta.TupleType;
import com.ibm.streamsx.kafka.KafkaOperatorException;
import com.ibm.streamsx.kafka.clients.producer.FailureDescription;
@@ -48,6 +50,7 @@ public class OutputPortSubmitter implements TupleProcessedHook {
private final Gson gson;
private int tupleAttrIndex = -1;
private int stringAttrIndex = -1;
+ private boolean stringAttrIsOptional = false;
private final int outQueueCapacity;
private final long outQueueOfferTimeoutMillis;
private final BlockingQueue outQueue;
@@ -115,12 +118,12 @@ public OutputPortSubmitter (OperatorContext opContext, int outQueueCapacity, lon
int nStringAttrs = 0;
for (String outAttrName: outSchema.getAttributeNames()) {
Attribute attr = outSchema.getAttribute (outAttrName);
- MetaType metaType = attr.getType().getMetaType();
+ final Type attrType = attr.getType();
+ final MetaType metaType = attrType.getMetaType();
switch (metaType) {
case TUPLE:
++nTupleAttrs;
- TupleType tupleType = (TupleType) attr.getType();
- StreamSchema tupleSchema = tupleType.getTupleSchema();
+ final StreamSchema tupleSchema = ((TupleType)attrType).getTupleSchema();
if (tupleSchema.equals (inPortSchema)) {
tupleAttrIndex = attr.getIndex();
}
@@ -130,6 +133,19 @@ public OutputPortSubmitter (OperatorContext opContext, int outQueueCapacity, lon
++nStringAttrs;
stringAttrIndex = attr.getIndex();
break;
+ case OPTIONAL:
+ final MetaType optionalValueMeta = ((OptionalType)attrType).getValueType().getMetaType();
+ switch (optionalValueMeta) {
+ case RSTRING:
+ case USTRING:
+ ++nStringAttrs;
+ this.stringAttrIsOptional = true;
+ this.stringAttrIndex = attr.getIndex();
+ break;
+ default:
+ trace.warn ("unsupported value type for optional attribute type in output port: " + optionalValueMeta + " for attribute '" + outAttrName + "'");
+ }
+ break;
default:
trace.warn ("unsupported attribute type in output port: " + metaType + " for attribute '" + outAttrName + "'");
}
@@ -161,7 +177,7 @@ public void start() {
@Override
public void onTupleProduced (Tuple tuple) {
if (submitOnlyErrors) return;
- enqueueOTuple (tuple, "");
+ enqueueOTuple (tuple, null);
}
/**
@@ -194,7 +210,7 @@ public void flush() {
public void reset() {
reset.set (true);
- // if (tupleSubmitter != null) tupleSubmitter.interrupt();
+ //if (tupleSubmitter != null) tupleSubmitter.interrupt();
flush();
reset.set (false);
}
@@ -208,12 +224,18 @@ private void enqueueOTuple (Tuple inTuple, String stringAttrVal) {
OutputTuple outTuple = out.newTuple();
if (tupleAttrIndex >= 0)
outTuple.assignTuple (tupleAttrIndex, inTuple);
- if (stringAttrIndex >= 0)
- outTuple.setString (stringAttrIndex, stringAttrVal);
+ if (stringAttrIndex >= 0) {
+ if (stringAttrIsOptional) {
+ outTuple.setString (stringAttrIndex, stringAttrVal);
+ }
+ else {
+ outTuple.setString (stringAttrIndex, stringAttrVal == null? "": stringAttrVal);
+ }
+ }
try {
this.nQt.incrementAndGet();
if (!outQueue.offer (outTuple, outQueueOfferTimeoutMillis, TimeUnit.MILLISECONDS)) {
- trace.error ("Output port queue congested (size = " + outQueueCapacity + "). Output tuple discarded.");
+ trace.error ("Output port queue congested (qsize = " + outQueueCapacity + "). Output tuple discarded.");
// tuple NOT offered. revert the previous increment
if (this.nQt.decrementAndGet() == 0) {
synchronized (queueMonitor) {
diff --git a/com.ibm.streamsx.kafka/info.xml b/com.ibm.streamsx.kafka/info.xml
index fa6f0f2d..534d526e 100644
--- a/com.ibm.streamsx.kafka/info.xml
+++ b/com.ibm.streamsx.kafka/info.xml
@@ -6,7 +6,8 @@
The Kafka toolkit integrates IBM Streams with Apache Kafka brokers. It can be used with following broker versions:
-* 0.10.2
+* 0.10.2 (with restrictions: `isolation.level=read_uncommitted` must be set for
+ the `KafkaConsumer`, and the **guaranteeOrdering** parameter of the `KafkaProducer` cannot be used)
* 0.11
* 1.0
* 1.1
@@ -15,13 +16,53 @@ The Kafka toolkit integrates IBM Streams with Apache Kafka brokers. It can be us
* 2.2
* 2.3
+All Kafka versions below 2.3 do not support the **staticGroupMember** parameter of the `KafkaConsumer` operator.
+
# Additional information
Additional user documentation can be found in the
[https://ibmstreams.github.io/streamsx.kafka/docs/user/overview/|toolkit documentation on GitHub].
+ What's new and what has changed
-This is an overview of changes for major and minor version upgrades. For details see the [https://github.com/IBMStreams/streamsx.kafka/releases|Releases in public Github].
+This is an overview of changes for major and minor version upgrades.
+For details see the [https://github.com/IBMStreams/streamsx.kafka/releases|Releases in public Github].
+
+++ What's new in version 3.0
+
+# New features
+
+* The included Kafka client has been upgraded from version 2.2.1 to 2.3.1.
+* The schema of the output port of the `KafkaProducer` operator supports optional types for the error description.
+* The optional input port of the `KafkaConsumer` operator can be used to change the *topic subscription*, not only the *partition assignment*.
+* The **guaranteeOrdering** parameter now enables the idempotent producer when set to `true`, which allows a higher throughput by allowing more
+ in-flight requests per connection (requires Kafka server version 0.11 or higher).
+* The `KafkaConsumer` operator now enables and benefits from group management when the user does not specify a group identifier.
+* Checkpoint reset of the `KafkaConsumer` is optimized in consistent region when the consumer is the only group member.
+* The `KafkaConsumer` operator can be configured as a static consumer group member (requires Kafka server version 2.3 or higher).
+ See also the *Static Consumer Group Membership* chapter in the KafkaConsumer's documentation.
+* The `KafkaConsumer` operator now uses `read_committed` as the default `isolation.level` configuration unless the user has specified a different value.
+ In `read_committed` mode, the consumer will read only those transactional messages which have been successfully committed.
+ Messages of aborted transactions are now skipped. The consumer will continue to read non-transactional messages as before.
+ This new default setting is incompatible with Kafka 0.10.2.
+
+# Deprecated features
+
+The use of the input control port has been deprecated when the `KafkaConsumer` is used in a consistent region.
+
+# Incompatible changes
+
+* The toolkit requires at minimum Streams version 4.3.
+* The **guaranteeOrdering** parameter of the `KafkaProducer` operator is incompatible with Kafka version 0.10.x when used with value `true`.
+ The work-around for Kafka 0.10.x is given in the parameter description.
+* When the `KafkaConsumer` operator is configured with input port, the **topic**, **pattern**, **partition**, and **startPosition**
+ parameters used to be ignored in previous versions. Now an SPL compiler failure is raised when one of these parameters is used
+ together with the input port.
+* The default `isolation.level` configuration of the `KafkaConsumer` operator is incompatible with Kafka broker version 0.10.x.
+ When connecting with Kafka 0.10.x, `isolation.level=read_uncommitted` must be used for the consumer configuration.
+
+++ What's new in version 2.2.1
+
+* bug fix: KafkaProducer: Lost output tuples on FinalMarker reception
++ What's new in version 2.2.1
@@ -169,8 +210,8 @@ of this toolkit with Red Hat AMQ Streams or with a Kafka cluster that is configu
can be read in the [https://ibmstreams.github.io/streamsx.kafka/docs/user/UsingRHAmqStreams/|toolkit documentation].
- 2.2.1
- 4.2.0.0
+ 3.0.0
+ 4.3.0.0
diff --git a/samples/KafkaAppConfigSample/info.xml b/samples/KafkaAppConfigSample/info.xml
index d2e28a22..7463ea5d 100644
--- a/samples/KafkaAppConfigSample/info.xml
+++ b/samples/KafkaAppConfigSample/info.xml
@@ -9,7 +9,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaAttrNameParamsSample/info.xml b/samples/KafkaAttrNameParamsSample/info.xml
index 69b8ee6c..40d80313 100644
--- a/samples/KafkaAttrNameParamsSample/info.xml
+++ b/samples/KafkaAttrNameParamsSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaBlobSample/info.xml b/samples/KafkaBlobSample/info.xml
index 3f45081f..4a82bb3d 100644
--- a/samples/KafkaBlobSample/info.xml
+++ b/samples/KafkaBlobSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaCRTransactionalProducer/info.xml b/samples/KafkaCRTransactionalProducer/info.xml
index 0aff397e..f335c937 100644
--- a/samples/KafkaCRTransactionalProducer/info.xml
+++ b/samples/KafkaCRTransactionalProducer/info.xml
@@ -9,7 +9,7 @@
com.ibm.streamsx.kafka
- [1.3.0,3.0.0)
+ [1.3.0,4.0.0)
diff --git a/samples/KafkaClientIdSample/info.xml b/samples/KafkaClientIdSample/info.xml
index 7d32d13d..8218f3ea 100644
--- a/samples/KafkaClientIdSample/info.xml
+++ b/samples/KafkaClientIdSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaConsumerGroupInputPortSample/.gitignore b/samples/KafkaConsumerGroupInputPortSample/.gitignore
new file mode 100644
index 00000000..bf480adf
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/.gitignore
@@ -0,0 +1,3 @@
+/.apt_generated/
+/.classpath
+/.settings/org.eclipse*
\ No newline at end of file
diff --git a/samples/KafkaConsumerGroupInputPortSample/.project b/samples/KafkaConsumerGroupInputPortSample/.project
new file mode 100644
index 00000000..8448572b
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/.project
@@ -0,0 +1,29 @@
+
+
+ KafkaConsumerGroupInputPortSample
+
+
+
+
+
+ org.eclipse.jdt.core.javabuilder
+
+
+
+
+ org.eclipse.xtext.ui.shared.xtextBuilder
+
+
+
+
+ com.ibm.streams.studio.splproject.builder.SPLProjectBuilder
+
+
+
+
+
+ org.eclipse.xtext.ui.shared.xtextNature
+ com.ibm.streams.studio.splproject.SPLProjectNature
+ org.eclipse.jdt.core.javanature
+
+
diff --git a/samples/KafkaConsumerGroupInputPortSample/.settings/com.ibm.streamsx.kafka.sample.KafkaConsumerGroupInputPortSample-BuildConfig.splbuild b/samples/KafkaConsumerGroupInputPortSample/.settings/com.ibm.streamsx.kafka.sample.KafkaConsumerGroupInputPortSample-BuildConfig.splbuild
new file mode 100644
index 00000000..b709f86a
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/.settings/com.ibm.streamsx.kafka.sample.KafkaConsumerGroupInputPortSample-BuildConfig.splbuild
@@ -0,0 +1,25 @@
+
+
+
+SPL Build Configuration: BuildConfig
+F
+T
+FDEF
+com.ibm.streamsx.kafka.sample::KafkaConsumerGroupInputPortSample
+BuildConfig
+
+
+T
+
+
+
+
+F
+T
+
+F
+
+BuildConfig
+
+F
+
\ No newline at end of file
diff --git a/samples/KafkaConsumerGroupInputPortSample/Makefile b/samples/KafkaConsumerGroupInputPortSample/Makefile
new file mode 100644
index 00000000..4f22794b
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/Makefile
@@ -0,0 +1,24 @@
+.PHONY: all clean
+
+COMPOSITE_NAME = KafkaConsumerGroupInputPortSample
+SPL_NAMESPACE = com.ibm.streamsx.kafka.sample
+SPL_MAIN_COMPOSITE = $(SPL_NAMESPACE)::$(COMPOSITE_NAME)
+OUTPUT_DIR = output/$(SPL_NAMESPACE).$(COMPOSITE_NAME)
+
+SPLC_FLAGS = -a
+SPLC = $(STREAMS_INSTALL)/bin/sc
+SPL_PATH = ../..:$(STREAMS_INSTALL)/toolkits/com.ibm.streamsx.kafka
+
+all:
+ if [ -x ../../gradlew ]; then \
+ ../../gradlew build; \
+ else \
+ $(SPLC) $(SPLC_FLAGS) -M $(SPL_MAIN_COMPOSITE) --output-directory $(OUTPUT_DIR) -t $(SPL_PATH); \
+ fi
+
+clean:
+ if [ -x ../../gradlew ]; then \
+ ../../gradlew clean; \
+ else \
+ $(SPLC) $(SPLC_FLAGS) -C -M $(SPL_MAIN_COMPOSITE) --output-directory $(OUTPUT_DIR); \
+ fi
diff --git a/samples/KafkaConsumerGroupInputPortSample/README.md b/samples/KafkaConsumerGroupInputPortSample/README.md
new file mode 100644
index 00000000..5afe95a6
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/README.md
@@ -0,0 +1,39 @@
+# Kafka consumer group with input port
+
+This sample demonstrates how to create a Consumer group that subscribes via control input port.
+The KafkaConsumer operator is configured with operator driven checkpointing.
+
+To make this sample work, these preconditions must be met:
+* The Streams instance or domain must be configured with a checkpoint repository.
+
+
+### Setup
+
+Make sure that either the properties
+```
+instance.checkpointRepository
+instance.checkpointRepositoryConfiguration
+```
+or
+```
+domain.checkpointRepository
+domain.checkpointRepositoryConfiguration
+```
+have valid values. For example, if you use a local redis server, you can set the properties to following values:
+```
+instance.checkpointRepository=redis
+instance.checkpointRepositoryConfiguration={ "replicas" : 1, "shards" : 1, "replicaGroups" : [ { "servers" : ["localhost:6379"], "description" : "localhost" } ] }
+```
+Use the command `streamtool getproperty -a | grep checkpoint` and `streamtool getdomainproperty -a | grep checkpoint` to see the current values.
+
+In the Kafka broker the topics `t1` and `t2` must be created with at least two partitions each.
+To run this sample, replace `` in the `etc/consumer.properties` file with the Kafka brokers that you wish to connect to.
+Here is an example of what this may look like:
+
+```
+bootstrap.servers=mybroker1:9191,mybroker2:9192,mybroker3:9193
+```
+
+Compile the sample with `make` or `gradle` and submit the job with
+`streamtool submitjob ./output/com.ibm.streamsx.kafka.sample.KafkaConsumerGroupInputPortSample/com.ibm.streamsx.kafka.sample.KafkaConsumerGroupInputPortSample.sab`.
+Don't forget to rebuild the application when you change Kafka properties in the property file for the consumer because they go into the application's bundle file.
diff --git a/samples/KafkaConsumerGroupInputPortSample/build.gradle b/samples/KafkaConsumerGroupInputPortSample/build.gradle
new file mode 100644
index 00000000..fdeb9f05
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/build.gradle
@@ -0,0 +1,25 @@
+apply from: "${rootDir}/common.gradle"
+apply plugin: 'java'
+
+def toolkitPath = project(':com.ibm.streamsx.kafka').projectDir
+def namespace = "com.ibm.streamsx.kafka.sample"
+def mainComp = "KafkaConsumerGroupInputPortSample"
+
+dependencies {
+ compile project(':com.ibm.streamsx.kafka')
+}
+
+task compile() {
+ doLast {
+ compileApp(namespace, mainComp, toolkitPath)
+ }
+}
+
+task cleanIt() {
+ doLast {
+ cleanApp(namespace, mainComp)
+ }
+}
+
+build.dependsOn compile
+clean.dependsOn cleanIt
diff --git a/samples/KafkaConsumerGroupInputPortSample/com.ibm.streamsx.kafka.sample/.namespace b/samples/KafkaConsumerGroupInputPortSample/com.ibm.streamsx.kafka.sample/.namespace
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/KafkaConsumerGroupInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerGroupInputPortSample.spl b/samples/KafkaConsumerGroupInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerGroupInputPortSample.spl
new file mode 100644
index 00000000..8f422498
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerGroupInputPortSample.spl
@@ -0,0 +1,87 @@
+namespace com.ibm.streamsx.kafka.sample ;
+
+use com.ibm.streamsx.kafka::* ;
+
+composite KafkaConsumerGroupInputPortSample {
+
+ graph
+ // ================ producers that continuously populate the topics t1 and t2
+ stream Data1 = Beacon() {
+ param
+ period: 0.1;
+ initDelay: 40.0;
+ iterations: 2000;
+ output Data1: key = "key-" + (rstring)random(), message = "t1 message no. " + (rstring)IterationCount();
+ }
+
+ () as ProducerT1 = KafkaProducer (Data1) {
+ param
+ propertiesFile: "etc/consumer.properties" ;
+ topic: "t1";
+ }
+
+ stream Data2 = Beacon() {
+ param
+ period: 0.1;
+ initDelay: 40.0;
+ iterations: 2000;
+ output Data2: key = "key-" + (rstring)random(), message = "t2 message no. " + (rstring)IterationCount();
+ }
+
+ () as ProducerT2 = KafkaProducer (Data2) {
+ param
+ propertiesFile: "etc/consumer.properties" ;
+ topic: "t2";
+ }
+
+ // ================= control stream for the consumer
+ (stream TopicSubscriptions) as
+ TopicUpdater = Custom() {
+ logic
+ onProcess: {
+ block(50f);
+
+ // subscribe to topic t1
+ rstring subscribeTopic_t1 = createMessageAddTopic("t1");
+ submit({ jsonString = subscribeTopic_t1 }, TopicSubscriptions);
+ block(5f);
+
+ // subscribe also topic t2
+ rstring subscribeTopic_t2 = createMessageAddTopic ("t2");
+ submit({ jsonString = subscribeTopic_t2 }, TopicSubscriptions);
+ block(60f);
+
+ // unsubscribe both topics
+ rstring unsubscribe_t1_t2 = createMessageRemoveTopics(["t1", "t2"]);
+ submit({ jsonString = unsubscribe_t1_t2 }, TopicSubscriptions);
+
+ block(15.0);
+ // subscribe again to topic t2
+ submit({ jsonString = subscribeTopic_t2 }, TopicSubscriptions);
+
+ // now nothing is consumed anymore; avoid sending final marker.
+ while(!isShutdown()) {
+ block (10000.0);
+ }
+ }
+ }
+
+ @parallel (width = 3, broadcast = [TopicSubscriptions])
+ (stream MessageOutStream) as
+ KafkaConsumerOp = KafkaConsumer (TopicSubscriptions) {
+ param
+ propertiesFile : "etc/consumer.properties" ;
+ groupId: "myConsumerGroup";
+ config
+ checkpoint: operatorDriven;
+ placement: partitionIsolation;
+ }
+
+ () as PrintOp = Custom(MessageOutStream as inputStream) {
+ logic
+ onTuple inputStream: {
+ println(inputStream) ;
+ }
+ }
+}
+
diff --git a/samples/KafkaConsumerGroupInputPortSample/etc/consumer.properties b/samples/KafkaConsumerGroupInputPortSample/etc/consumer.properties
new file mode 100644
index 00000000..f9a561a9
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/etc/consumer.properties
@@ -0,0 +1 @@
+#bootstrap.servers=
diff --git a/samples/KafkaConsumerGroupInputPortSample/info.xml b/samples/KafkaConsumerGroupInputPortSample/info.xml
new file mode 100644
index 00000000..81a15a90
--- /dev/null
+++ b/samples/KafkaConsumerGroupInputPortSample/info.xml
@@ -0,0 +1,16 @@
+
+
+
+ KafkaConsumerGroupInputPortSample
+
+ 1.0.0
+ 4.3.0.0
+
+
+
+ com.ibm.streamsx.kafka
+ [3.0.0,4.0.0)
+
+
+
diff --git a/samples/KafkaConsumerGroupWithConsistentRegion/info.xml b/samples/KafkaConsumerGroupWithConsistentRegion/info.xml
index f7ae836a..e8a9ad83 100644
--- a/samples/KafkaConsumerGroupWithConsistentRegion/info.xml
+++ b/samples/KafkaConsumerGroupWithConsistentRegion/info.xml
@@ -9,7 +9,7 @@
com.ibm.streamsx.kafka
- [1.5.0,3.0.0)
+ [1.5.0,4.0.0)
diff --git a/samples/KafkaConsumerInputPortSample/.gitignore b/samples/KafkaConsumerInputPortSample/.gitignore
new file mode 100644
index 00000000..c71ea97a
--- /dev/null
+++ b/samples/KafkaConsumerInputPortSample/.gitignore
@@ -0,0 +1 @@
+/.apt_generated/
diff --git a/samples/KafkaConsumerInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerInputPortSample.spl b/samples/KafkaConsumerInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerInputPortSample.spl
index dca91d72..60a1f4ac 100644
--- a/samples/KafkaConsumerInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerInputPortSample.spl
+++ b/samples/KafkaConsumerInputPortSample/com.ibm.streamsx.kafka.sample/KafkaConsumerInputPortSample.spl
@@ -15,7 +15,7 @@ composite KafkaConsumerInputPortSample
() as ProducerT1 = KafkaProducer (Data1) {
param
- propertiesFile : getThisToolkitDir() + "/etc/consumer.properties" ;
+ propertiesFile : "etc/consumer.properties" ;
partitionAttribute: partitionNo;
topic: "t1";
}
@@ -28,7 +28,7 @@ composite KafkaConsumerInputPortSample
() as ProducerT2 = KafkaProducer (Data2) {
param
- propertiesFile : getThisToolkitDir() + "/etc/consumer.properties" ;
+ propertiesFile : "etc/consumer.properties" ;
partitionAttribute: partitionNo;
topic: "t2";
}
@@ -75,7 +75,7 @@ composite KafkaConsumerInputPortSample
rstring topic, int32 partition, int64 offset> MessageOutStream) as
KafkaConsumerOp = KafkaConsumer(TopicPartitionUpdateStream) {
param
- propertiesFile : getThisToolkitDir() + "/etc/consumer.properties" ;
+ propertiesFile : "etc/consumer.properties" ;
config checkpoint: operatorDriven;
}
diff --git a/samples/KafkaConsumerInputPortSample/info.xml b/samples/KafkaConsumerInputPortSample/info.xml
index bd0d6308..3484a687 100644
--- a/samples/KafkaConsumerInputPortSample/info.xml
+++ b/samples/KafkaConsumerInputPortSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.1.1,3.0.0)
+ [1.1.1,4.0.0)
diff --git a/samples/KafkaConsumerLoadSample/info.xml b/samples/KafkaConsumerLoadSample/info.xml
index 2b0bfa9c..1ac1377c 100644
--- a/samples/KafkaConsumerLoadSample/info.xml
+++ b/samples/KafkaConsumerLoadSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaFloatSample/info.xml b/samples/KafkaFloatSample/info.xml
index 17857ffc..fc1a9fe5 100644
--- a/samples/KafkaFloatSample/info.xml
+++ b/samples/KafkaFloatSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaIntegerSample/info.xml b/samples/KafkaIntegerSample/info.xml
index 60ad2f77..e947b291 100644
--- a/samples/KafkaIntegerSample/info.xml
+++ b/samples/KafkaIntegerSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaJAASConfigSample/info.xml b/samples/KafkaJAASConfigSample/info.xml
index 6ea55cf9..5174f1f5 100644
--- a/samples/KafkaJAASConfigSample/info.xml
+++ b/samples/KafkaJAASConfigSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaProducerCustomPartitioner/com.ibm.streamsx.kafka.sample/KafkaProducerCustomPartitioner.spl b/samples/KafkaProducerCustomPartitioner/com.ibm.streamsx.kafka.sample/KafkaProducerCustomPartitioner.spl
index df2541a2..20e00669 100644
--- a/samples/KafkaProducerCustomPartitioner/com.ibm.streamsx.kafka.sample/KafkaProducerCustomPartitioner.spl
+++ b/samples/KafkaProducerCustomPartitioner/com.ibm.streamsx.kafka.sample/KafkaProducerCustomPartitioner.spl
@@ -40,7 +40,7 @@ composite KafkaProducerCustomPartitioner
param
topic : "test" ;
propertiesFile : "etc/producer.properties";
- userLib : getThisToolkitDir() + "/etc/custom-partitioner.jar";
+ userLib : "etc/custom-partitioner.jar";
}
(stream Beacon_4_out0) as Beacon_4 = Beacon()
diff --git a/samples/KafkaProducerCustomPartitioner/info.xml b/samples/KafkaProducerCustomPartitioner/info.xml
index 95ddea12..1a8f3775 100644
--- a/samples/KafkaProducerCustomPartitioner/info.xml
+++ b/samples/KafkaProducerCustomPartitioner/info.xml
@@ -9,7 +9,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaPublishToPartitionSample/info.xml b/samples/KafkaPublishToPartitionSample/info.xml
index 172bc65a..e1b55a86 100644
--- a/samples/KafkaPublishToPartitionSample/info.xml
+++ b/samples/KafkaPublishToPartitionSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaSample/info.xml b/samples/KafkaSample/info.xml
index 4c947d59..891c5eea 100644
--- a/samples/KafkaSample/info.xml
+++ b/samples/KafkaSample/info.xml
@@ -10,7 +10,7 @@
com.ibm.streamsx.kafka
- [1.0.0,3.0.0)
+ [1.0.0,4.0.0)
diff --git a/samples/KafkaStartOffsetSample/info.xml b/samples/KafkaStartOffsetSample/info.xml
index ce74ea04..a4886af0 100644
--- a/samples/KafkaStartOffsetSample/info.xml
+++ b/samples/KafkaStartOffsetSample/info.xml
@@ -9,7 +9,7 @@
com.ibm.streamsx.kafka
- [1.1.1,3.0.0)
+ [1.1.1,4.0.0)
diff --git a/tests/KafkaTests/.classpath b/tests/KafkaTests/.classpath
index 151e6e55..a7eeb063 100644
--- a/tests/KafkaTests/.classpath
+++ b/tests/KafkaTests/.classpath
@@ -15,9 +15,9 @@
-
+
-
+
@@ -35,24 +35,24 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/AbstractKafkaTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/AbstractKafkaTest.java
index 353f4b74..92eb072d 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/AbstractKafkaTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/AbstractKafkaTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.io.File;
@@ -8,21 +21,41 @@
abstract class AbstractKafkaTest {
- private Topology topo;
+ private final Topology topology;
+ private final String testName;
- public AbstractKafkaTest(String testName) throws Exception {
- topo = createTopology(testName);
+ public AbstractKafkaTest() throws Exception {
+ this.testName = this.getClass().getName().replace(this.getClass().getPackage().getName() + ".", "");
+ topology = createTopology (this.testName);
}
- protected Topology createTopology(String testName) throws Exception {
- Topology t = new Topology(testName);
+ /**
+ * creates a Topology object with added toolkit and property file as file dependency.
+ * @param name the name of the topology
+ * @return the topology instance
+ * @throws Exception
+ */
+ protected Topology createTopology (String name) throws Exception {
+ Topology t = new Topology(name);
t.addFileDependency(Constants.PROPERTIES_FILE_PATH, "etc");
SPL.addToolkit(t, new File("../../com.ibm.streamsx.kafka"));
return t;
}
+ /**
+ * Gets the name of the test case. This is part of the Main composite name.
+ * @return the testName
+ */
+ public String getTestName() {
+ return testName;
+ }
+
+ /**
+ * Gets the Topology instance that is created for the test case
+ * @return the Topology instance
+ */
public Topology getTopology() {
- return topo;
+ return topology;
}
}
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerFanInTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerFanInTest.java
index 68a71916..81c5bfa7 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerFanInTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerFanInTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.Arrays;
@@ -33,10 +46,8 @@
*/
public class KafkaConsumerFanInTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaConsumerFanInTest";
-
public KafkaConsumerFanInTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -63,8 +74,8 @@ public void kafkaFanInTest() throws Exception {
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 2);
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), expectedArr);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerPatternSubscribeTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerPatternSubscribeTest.java
index 2ff36556..d6d590a2 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerPatternSubscribeTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaConsumerPatternSubscribeTest.java
@@ -1,5 +1,15 @@
-/**
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
*
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
package com.ibm.streamsx.kafka.test;
@@ -32,12 +42,11 @@
* - topics other1 and other2 are created
*/
public class KafkaConsumerPatternSubscribeTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaConsumerPatternSubscribeTest";
public KafkaConsumerPatternSubscribeTest() throws Exception {
- super(TEST_NAME);
+ super();
}
-
+
@Test
public void kafkaPatternSubscribeTest() throws Exception {
Topology topo = getTopology();
@@ -58,8 +67,8 @@ public void kafkaPatternSubscribeTest() throws Exception {
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 2);
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), expectedArr);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
@@ -75,7 +84,7 @@ private Map getProducerKafkaParams() {
return params;
}
-
+
private Map getConsumerKafkaParams() {
Map params = new HashMap();
params.put("pattern", "other[12]{1}");
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsAttrNameParamsTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsAttrNameParamsTest.java
index d3de1044..aa7d1fd9 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsAttrNameParamsTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsAttrNameParamsTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -12,7 +25,6 @@
import com.ibm.streams.operator.StreamSchema;
import com.ibm.streamsx.kafka.test.utils.Constants;
import com.ibm.streamsx.kafka.test.utils.Delay;
-import com.ibm.streamsx.kafka.test.utils.KafkaSPLStreamsUtils;
import com.ibm.streamsx.topology.TStream;
import com.ibm.streamsx.topology.Topology;
import com.ibm.streamsx.topology.context.StreamsContext;
@@ -35,7 +47,6 @@
*/
public class KafkaOperatorsAttrNameParamsTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsAttrNameParamsTest";
private static final String PROD_KEY_ATTR_NAME = "myProdKey";
private static final String PROD_MSG_ATTR_NAME = "myProdMsg";
private static final String PROD_TOPIC_ATTR_NAME = "myProdTopic";
@@ -49,7 +60,7 @@ public class KafkaOperatorsAttrNameParamsTest extends AbstractKafkaTest {
private static final String MSG = "myMsg";
public KafkaOperatorsAttrNameParamsTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -89,8 +100,8 @@ public void kafkaAttrNameParamsTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), Constants.TOPIC_TEST + ":" + KEY + ":" + MSG);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsBlobTypeTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsBlobTypeTest.java
index 4c0f0d45..2b505885 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsBlobTypeTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsBlobTypeTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -37,10 +50,8 @@
*/
public class KafkaOperatorsBlobTypeTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsBlobTypeTest";
-
public KafkaOperatorsBlobTypeTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -67,8 +78,8 @@ public void kafkaBlobTypeTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), Constants.STRING_DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsDoubleTypeTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsDoubleTypeTest.java
index 1a00f31f..919a7fb8 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsDoubleTypeTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsDoubleTypeTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -15,7 +28,6 @@
import com.ibm.streamsx.kafka.test.utils.KafkaSPLStreamsUtils;
import com.ibm.streamsx.topology.TStream;
import com.ibm.streamsx.topology.Topology;
-import com.ibm.streamsx.topology.context.ContextProperties;
import com.ibm.streamsx.topology.context.StreamsContext;
import com.ibm.streamsx.topology.context.StreamsContext.Type;
import com.ibm.streamsx.topology.context.StreamsContextFactory;
@@ -36,11 +48,10 @@
*/
public class KafkaOperatorsDoubleTypeTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsDoubleTypeTest";
private static final String[] DATA = {"10.1", "20.2", "30.3", "40.4", "50.5"};
public KafkaOperatorsDoubleTypeTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -64,8 +75,8 @@ public void kafkaDoubleTypeTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsFloatTypeTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsFloatTypeTest.java
index 0e29cf59..3b1eee42 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsFloatTypeTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsFloatTypeTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
@@ -36,11 +49,10 @@
*/
public class KafkaOperatorsFloatTypeTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsFloatTypeTest";
private static final String[] DATA = {"10.1", "20.2", "30.3", "40.4", "50.5"};
public KafkaOperatorsFloatTypeTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -67,8 +79,8 @@ public void kafkaFloatTypeTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsGreenThread.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsGreenThread.java
index 99fedf7d..badacfb5 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsGreenThread.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsGreenThread.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -13,7 +26,6 @@
import com.ibm.streamsx.kafka.test.utils.KafkaSPLStreamsUtils;
import com.ibm.streamsx.topology.TStream;
import com.ibm.streamsx.topology.Topology;
-import com.ibm.streamsx.topology.context.ContextProperties;
import com.ibm.streamsx.topology.context.StreamsContext;
import com.ibm.streamsx.topology.context.StreamsContext.Type;
import com.ibm.streamsx.topology.context.StreamsContextFactory;
@@ -33,10 +45,8 @@
*/
public class KafkaOperatorsGreenThread extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsGreenThread";
-
public KafkaOperatorsGreenThread() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -57,8 +67,8 @@ public void kafkaGreenThread() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), Constants.STRING_DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsIntTypeTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsIntTypeTest.java
index cbf451cb..e6ae4ae1 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsIntTypeTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsIntTypeTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -35,11 +48,10 @@
*/
public class KafkaOperatorsIntTypeTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsIntTypeTest";
private static final String[] DATA = {"10", "20", "30", "40", "50"};
public KafkaOperatorsIntTypeTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -66,8 +78,8 @@ public void kafkaIntTypeTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsLongTypeTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsLongTypeTest.java
index ad47b0c6..1b862383 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsLongTypeTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsLongTypeTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -35,11 +48,10 @@
*/
public class KafkaOperatorsLongTypeTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsLongTypeTest";
private static final String[] DATA = {"10", "20", "30", "40", "50"};
public KafkaOperatorsLongTypeTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -62,8 +74,8 @@ public void kafkaLongTypeTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsMultipleTopics.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsMultipleTopics.java
index ce1fcbfa..db00b375 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsMultipleTopics.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsMultipleTopics.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.Arrays;
@@ -33,10 +46,8 @@
*/
public class KafkaOperatorsMultipleTopics extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsMultipleTopics";
-
public KafkaOperatorsMultipleTopics() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -59,8 +70,8 @@ public void kafkaMultipleTopicsTest() throws Exception {
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 3);
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), expectedArr);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsNoKey.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsNoKey.java
index fde8dd1e..cbfc8723 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsNoKey.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsNoKey.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -32,10 +45,8 @@
*/
public class KafkaOperatorsNoKey extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsNoKey";
-
public KafkaOperatorsNoKey() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -57,8 +68,8 @@ public void kafkaNoKeyTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), Constants.STRING_DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsStartPositionTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsStartPositionTest.java
index e62c48fa..84e09937 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsStartPositionTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsStartPositionTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.math.BigInteger;
@@ -33,14 +46,12 @@
*/
public class KafkaOperatorsStartPositionTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsStartPositionTest";
-
public enum StartPosition {
Beginning;
}
public KafkaOperatorsStartPositionTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -84,9 +95,9 @@ public void kafkaStartPositionTest() throws Exception {
Tester tester = topo.getTester();
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), Constants.STRING_DATA);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
-
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsTopicPartitionTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsTopicPartitionTest.java
index 644c2029..19c525f0 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsTopicPartitionTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaOperatorsTopicPartitionTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -39,11 +52,10 @@
*/
public class KafkaOperatorsTopicPartitionTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaOperatorsTopicPartitionTest";
private static final StreamSchema SCHEMA = com.ibm.streams.operator.Type.Factory.getStreamSchema("tuple");
public KafkaOperatorsTopicPartitionTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -74,9 +86,9 @@ public void kafkaTopicPartitionTest() throws Exception {
String[] expectedArr = {"A0", "B1", "C2", "A3", "B4", "C5", "A6", "B7", "C8"};
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), expectedArr);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
-
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerFanOutTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerFanOutTest.java
index 9a7f46cd..ff93e12c 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerFanOutTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerFanOutTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -32,10 +45,8 @@
*/
public class KafkaProducerFanOutTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaProducerFanOutTest";
-
public KafkaProducerFanOutTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -63,9 +74,9 @@ public void kafkaFanOutTest() throws Exception {
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 2);
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), expectedArr);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
-
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerOPortSchemaTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerOPortSchemaTest.java
new file mode 100644
index 00000000..e3a6c3d8
--- /dev/null
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerOPortSchemaTest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.ibm.streamsx.kafka.test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.ibm.streams.operator.OutputTuple;
+import com.ibm.streams.operator.StreamSchema;
+import com.ibm.streamsx.kafka.test.utils.Constants;
+import com.ibm.streamsx.kafka.test.utils.Message;
+import com.ibm.streamsx.topology.TStream;
+import com.ibm.streamsx.topology.Topology;
+import com.ibm.streamsx.topology.context.StreamsContext;
+import com.ibm.streamsx.topology.context.StreamsContext.Type;
+import com.ibm.streamsx.topology.context.StreamsContextFactory;
+import com.ibm.streamsx.topology.function.BiFunction;
+import com.ibm.streamsx.topology.function.Supplier;
+import com.ibm.streamsx.topology.spl.SPL;
+import com.ibm.streamsx.topology.spl.SPLStream;
+import com.ibm.streamsx.topology.spl.SPLStreams;
+import com.ibm.streamsx.topology.tester.Condition;
+import com.ibm.streamsx.topology.tester.Tester;
+
+/**
+ * Test the optional output port of the KafkaProducer with different SPL schemas.
+ *
+ * Test requirements:
+ * - appConfig "kafka-test" be created on the domain
+ * - topic "test" to be created
+ */
+public class KafkaProducerOPortSchemaTest extends AbstractKafkaTest {
+
+ public static final long NUM_TUPLES = 10;
+ private static final StreamSchema PRODUCER_IN_SCHEMA = com.ibm.streams.operator.Type.Factory.getStreamSchema("tuple");
+
+ /**
+ * Supplies the input data for the producer.
+ */
+ private static class MySupplier implements Supplier> {
+ private static final long serialVersionUID = 1L;
+ private int counter = 0;
+
+ @Override
+ public Message get() {
+ int key = ++counter;
+ String message = "message " + key;
+ return new Message(key, message);
+ }
+ }
+
+ /**
+ * Applies a {@link Message} object to an output tuple
+ */
+ private static class MessageConverter implements BiFunction, OutputTuple, OutputTuple> {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public OutputTuple apply (Message msg, OutputTuple outTuple) {
+ outTuple.setInt ("key", msg.getKey());
+ outTuple.setString ("message", msg.getValue());
+ return outTuple;
+ }
+ }
+
+ public KafkaProducerOPortSchemaTest() throws Exception {
+ super();
+ }
+
+ @Test
+ public void kafkaProducerOPortRstringTest() throws Exception {
+ final StreamSchema oSchema = com.ibm.streams.operator.Type.Factory.getStreamSchema(
+ "tuple inTuple, rstring failureDescription>"
+ );
+ doTestWithSPLSchema (oSchema);
+ }
+
+ @Test
+ public void kafkaProducerOPortUstringTest() throws Exception {
+ final StreamSchema oSchema = com.ibm.streams.operator.Type.Factory.getStreamSchema(
+ "tuple inTuple, ustring failureDescription>"
+ );
+ doTestWithSPLSchema (oSchema);
+ }
+
+ @Test
+ public void kafkaProducerOPortOptionalRstringTest() throws Exception {
+ final StreamSchema oSchema = com.ibm.streams.operator.Type.Factory.getStreamSchema(
+ "tuple inTuple, optional failureDescription>"
+ );
+ doTestWithSPLSchema (oSchema);
+ }
+
+ @Test
+ public void kafkaProducerOPortOptionalUstringTest() throws Exception {
+ final StreamSchema oSchema = com.ibm.streams.operator.Type.Factory.getStreamSchema(
+ "tuple inTuple, optional failureDescription>"
+ );
+ doTestWithSPLSchema (oSchema);
+ }
+
+
+ protected void doTestWithSPLSchema (StreamSchema splOutSchema) throws Exception {
+ Topology topo = createTopology (getTestName());
+
+ // data generator
+ TStream> src = topo.limitedSource (new MySupplier(), NUM_TUPLES);
+ SPLStream outStream = SPLStreams.convertStream (src, new MessageConverter(), PRODUCER_IN_SCHEMA);
+ // create producer
+ SPLStream statusStream = SPL.invokeOperator ("Producer", Constants.KafkaProducerOp, outStream, splOutSchema, getKafkaProducerParams());
+ StreamsContext> context = StreamsContextFactory.getStreamsContext (Type.DISTRIBUTED_TESTER);
+ Tester tester = topo.getTester();
+ Condition numExpectedStatusTuples = tester.tupleCount (statusStream, NUM_TUPLES);
+ HashMap config = new HashMap<>();
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+ tester.complete (context, config, numExpectedStatusTuples, 60, TimeUnit.SECONDS);
+
+ // check the results
+ Assert.assertTrue (numExpectedStatusTuples.valid());
+ Assert.assertTrue (numExpectedStatusTuples.getResult() == NUM_TUPLES);
+ }
+
+
+ private Map getKafkaProducerParams() {
+ Map params = new HashMap<>();
+ params.put("topic", Constants.TOPIC_TEST);
+ params.put("appConfigName", Constants.APP_CONFIG);
+ params.put("outputErrorsOnly", false);
+ return params;
+ }
+}
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerPartitionAttrTest.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerPartitionAttrTest.java
index acc8391a..35079579 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerPartitionAttrTest.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/KafkaProducerPartitionAttrTest.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test;
import java.util.HashMap;
@@ -36,14 +49,13 @@
*/
public class KafkaProducerPartitionAttrTest extends AbstractKafkaTest {
- private static final String TEST_NAME = "KafkaProducerPartitionAttrTest";
private static final StreamSchema CONSUMER_SCHEMA = com.ibm.streams.operator.Type.Factory.getStreamSchema("tuple");
private static final StreamSchema PRODUCER_SCHEMA = com.ibm.streams.operator.Type.Factory.getStreamSchema("tuple");
private static final Integer PARTITION_NUM = 1;
public KafkaProducerPartitionAttrTest() throws Exception {
- super(TEST_NAME);
+ super();
}
@Test
@@ -69,9 +81,9 @@ public void kafkaProducerPartitionAttrTest() throws Exception {
String[] expectedArr = {"A0", "B1", "C2", "A3", "B4", "C5", "A6", "B7", "C8"};
Condition> stringContentsUnordered = tester.stringContentsUnordered (msgStream.toStringStream(), expectedArr);
HashMap config = new HashMap<>();
-// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
-// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
-
+ // config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
+ // config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
+
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Constants.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Constants.java
index f7917872..8bdf9a87 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Constants.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Constants.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test.utils;
public interface Constants {
@@ -10,8 +23,6 @@ public interface Constants {
public static final String KafkaProducerOp = "com.ibm.streamsx.kafka::KafkaProducer";
public static final String KafkaConsumerOp = "com.ibm.streamsx.kafka::KafkaConsumer";
- public static final String MessageHubConsumerOp = "com.ibm.streamsx.kafka.messagehub::MessageHubConsumer";
- public static final String MessageHubProducerOp = "com.ibm.streamsx.kafka.messagehub::MessageHubProducer";
public static final Long PRODUCER_DELAY = 5000l;
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Delay.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Delay.java
index d37f0bf1..fbe054c3 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Delay.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Delay.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test.utils;
import java.io.ObjectStreamException;
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/KafkaSPLStreamsUtils.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/KafkaSPLStreamsUtils.java
index 9023b668..592efc99 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/KafkaSPLStreamsUtils.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/KafkaSPLStreamsUtils.java
@@ -1,3 +1,16 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test.utils;
import java.util.ArrayList;
diff --git a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Message.java b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Message.java
index 754594d1..d0af1ffe 100644
--- a/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Message.java
+++ b/tests/KafkaTests/src/test/java/com/ibm/streamsx/kafka/test/utils/Message.java
@@ -1,11 +1,30 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package com.ibm.streamsx.kafka.test.utils;
import java.io.Serializable;
+/**
+ * immutable class that holds a key and a value (message)
+ *
+ * @param The key class
+ * @param The message class
+ */
public class Message implements Serializable {
private static final long serialVersionUID = 1L;
- private K key;
- private V value;
+ private final K key;
+ private final V value;
public Message(K key, V value) {
this.key = key;
@@ -19,5 +38,4 @@ public K getKey() {
public V getValue() {
return value;
}
-
}