From 24fcba465185e19ca7d86b80d9775618eaa87e02 Mon Sep 17 00:00:00 2001 From: Sriram Subramanian Date: Mon, 3 Oct 2016 16:58:20 -0700 Subject: [PATCH 01/62] "Version bump to 3.2.0-SNAPSHOT" --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 7c0f9da..f32fc69 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT kafka-connect-hdfs Confluent, Inc. @@ -50,7 +50,7 @@ - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT 0.10.1.0-SNAPSHOT 4.12 2.6.1 From c8a2a831d13e115603a300fbb7b1e58a9df53944 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Tue, 4 Oct 2016 20:24:54 -0700 Subject: [PATCH 02/62] Version bump to 0.10.2.0-SNAPSHOT --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index f32fc69..28ac390 100644 --- a/pom.xml +++ b/pom.xml @@ -51,7 +51,7 @@ 3.2.0-SNAPSHOT - 0.10.1.0-SNAPSHOT + 0.10.2.0-SNAPSHOT 4.12 2.6.1 1.2.1 From 76e40c890a60980d17b983dffaa4ec1bb5d202a8 Mon Sep 17 00:00:00 2001 From: Sriram Subramanian Date: Tue, 4 Oct 2016 22:03:28 -0700 Subject: [PATCH 03/62] fix version to 3.2 in docs --- docs/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 96288ab..7f34f96 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -57,9 +57,9 @@ def setup(app): # built documents. # # The short X.Y version. -version = '3.0' +version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.1.0-SNAPSHOT' +release = '3.2.0-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 7b5dfa12d1b7b21884298b89ae8e55bda59e8b0a Mon Sep 17 00:00:00 2001 From: Alexis Seigneurin Date: Thu, 13 Oct 2016 14:31:34 -0400 Subject: [PATCH 04/62] fix a typo --- docs/hdfs_connector.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index e1947cd..d08c755 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -5,7 +5,7 @@ The HDFS connector allows you to export data from Kafka topics to HDFS files in and integrates with Hive to make data immediately available for querying with HiveQL. The connector periodically polls data from Kafka and writes them to HDFS. The data from each Kafka -topic is partitioned by the provided partitioner and divided into chucks. Each chunk of data is +topic is partitioned by the provided partitioner and divided into chunks. Each chunk of data is represented as an HDFS file with topic, kafka partition, start and end offsets of this data chuck in the filename. If no partitioner is specified in the configuration, the default partitioner which preserves the Kafka partitioning is used. The size of each data chunk is determined by the number of From b8de69ce353f757b1270e5f06f1848793a62deb7 Mon Sep 17 00:00:00 2001 From: Alexis Seigneurin Date: Thu, 13 Oct 2016 14:32:44 -0400 Subject: [PATCH 05/62] fix a typo --- docs/hdfs_connector.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index d08c755..220affb 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -6,7 +6,7 @@ and integrates with Hive to make data immediately available for querying with Hi The connector periodically polls data from Kafka and writes them to HDFS. The data from each Kafka topic is partitioned by the provided partitioner and divided into chunks. Each chunk of data is -represented as an HDFS file with topic, kafka partition, start and end offsets of this data chuck +represented as an HDFS file with topic, kafka partition, start and end offsets of this data chunk in the filename. If no partitioner is specified in the configuration, the default partitioner which preserves the Kafka partitioning is used. The size of each data chunk is determined by the number of records written to HDFS, the time written to HDFS and schema compatibility. From c058019e9e3cfe8cb0b0807f90c16b2130b69aee Mon Sep 17 00:00:00 2001 From: Hans Jespersen Date: Fri, 14 Oct 2016 10:00:16 -0700 Subject: [PATCH 06/62] fix typo --- docs/hdfs_connector.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index 220affb..1b77dc2 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -164,7 +164,7 @@ write Parquet format and use hourly partitioner:: format.class=io.confluent.connect.hdfs.parquet.ParquetFormat partitioner.class=io.confluent.connect.hdfs.partitioner.HourlyPartitioner -.. note:: If you want ot use the field partitioner, you need to specify the ``partition.field.name`` +.. note:: If you want to use the field partitioner, you need to specify the ``partition.field.name`` configuration as well to specify the field name of the record. Hive Integration From 0f6173ca641c80a9a8bec87c2bb6d6364b72dcb4 Mon Sep 17 00:00:00 2001 From: Shikhar Bhushan Date: Fri, 4 Nov 2016 13:27:53 -0700 Subject: [PATCH 07/62] CC-273: connect deps should be scoped as provided --- pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/pom.xml b/pom.xml index 28ac390..c4a7582 100644 --- a/pom.xml +++ b/pom.xml @@ -76,6 +76,7 @@ org.apache.kafka connect-api ${kafka.version} + provided io.confluent From e42429d60c91dd6f122a25f53d0fd4d2fdfacd75 Mon Sep 17 00:00:00 2001 From: Shikhar Bhushan Date: Fri, 4 Nov 2016 13:29:37 -0700 Subject: [PATCH 08/62] Switch to toEnrichedRst() for config opts docgen --- .../java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java index 0b492d7..adddc2e 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java @@ -409,6 +409,6 @@ public HdfsSinkConnectorConfig(Map props) { } public static void main(String[] args) { - System.out.println(config.toRst()); + System.out.println(config.toEnrichedRst()); } } From f68beb4c2733fab348aac73da3040f1015acfba2 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Mon, 14 Nov 2016 18:22:16 -0800 Subject: [PATCH 09/62] CC-391: Fix configuration with DefaultPartitioner --- .../java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java index adddc2e..fac3e01 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java @@ -14,7 +14,6 @@ package io.confluent.connect.hdfs; -import org.apache.kafka.clients.producer.internals.DefaultPartitioner; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; @@ -28,6 +27,7 @@ import java.util.Map; import io.confluent.connect.hdfs.partitioner.DailyPartitioner; +import io.confluent.connect.hdfs.partitioner.DefaultPartitioner; import io.confluent.connect.hdfs.partitioner.FieldPartitioner; import io.confluent.connect.hdfs.partitioner.HourlyPartitioner; import io.confluent.connect.hdfs.partitioner.Partitioner; From f38410acd66c78b544609188a24d257f19120857 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Thu, 1 Dec 2016 21:48:57 -0800 Subject: [PATCH 10/62] HOTFIX: Override new method in MockSinkTaskContext. --- .../connect/hdfs/HdfsSinkConnectorTestBase.java | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java index d2c0e0e..7679ca9 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java @@ -181,13 +181,12 @@ public Set assignment() { } @Override - public void pause(TopicPartition... partitions) { - return; - } + public void pause(TopicPartition... partitions) {} @Override - public void resume(TopicPartition... partitions) { - return; - } + public void resume(TopicPartition... partitions) {} + + @Override + public void requestCommit() {} } } From e3f6641f5ad44c5d7bc1f720d0985d6256afff74 Mon Sep 17 00:00:00 2001 From: Michael Pearce Date: Sat, 14 Jan 2017 05:41:37 +0000 Subject: [PATCH 11/62] Fix topics with periods for hive #136 #137 Fix issue with topic names containing dots, not being compatible with hive (replace dot with underscore) After initial feedback/code review: Added test Removed unused imports accidentally added. --- .../connect/hdfs/avro/AvroHiveUtil.java | 2 +- .../connect/hdfs/hive/HiveMetaStore.java | 43 +++++++++-------- .../confluent/connect/hdfs/hive/HiveUtil.java | 5 ++ .../connect/hdfs/parquet/ParquetHiveUtil.java | 2 +- .../hdfs/HdfsSinkConnectorTestBase.java | 2 + .../hdfs/avro/HiveIntegrationAvroTest.java | 48 +++++++++++++++++++ 6 files changed, 81 insertions(+), 21 deletions(-) diff --git a/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java b/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java index 218bb5a..85d9de8 100644 --- a/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java +++ b/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java @@ -60,7 +60,7 @@ public void alterSchema(String database, String tableName, Schema schema) throws private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException { - Table table = new Table(database, tableName); + Table table = newTable(database, tableName); table.setTableType(TableType.EXTERNAL_TABLE); table.getParameters().put("EXTERNAL", "TRUE"); String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName); diff --git a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java index 385278e..7d533db 100644 --- a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java +++ b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java @@ -94,7 +94,7 @@ public Void call() throws TException { // purposely don't check if the partition already exists because // getPartition(db, table, path) will throw an exception to indicate the // partition doesn't exist also. this way, it's only one call. - client.appendPartition(database, tableName, path); + client.appendPartition(database, tableNameConverter(tableName), path); return null; } }; @@ -104,7 +104,7 @@ public Void call() throws TException { } catch (AlreadyExistsException e) { // this is okay } catch (InvalidObjectException e) { - throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableName + ": " + path, e); + throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableNameConverter(tableName) + ": " + path, e); } catch (MetaException e) { throw new HiveMetaStoreException("Hive MetaStore exception", e); } catch (TException e) { @@ -116,7 +116,7 @@ public void dropPartition(final String database, final String tableName, final S ClientAction dropPartition = new ClientAction() { @Override public Void call() throws TException { - client.dropPartition(database, tableName, path, false); + client.dropPartition(database, tableNameConverter(tableName), path, false); return null; } }; @@ -126,7 +126,7 @@ public Void call() throws TException { } catch (NoSuchObjectException e) { // this is okay } catch (InvalidObjectException e) { - throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableName + ": " + path, e); + throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableNameConverter(tableName) + ": " + path, e); } catch (MetaException e) { throw new HiveMetaStoreException("Hive MetaStore exception", e); } catch (TException e) { @@ -182,6 +182,7 @@ public void createTable(final Table table) throws HiveMetaStoreException { ClientAction create = new ClientAction() { @Override public Void call() throws TException { + client.createTable(table.getTTable()); return null; } @@ -192,7 +193,7 @@ public Void call() throws TException { try { doAction(create); } catch (NoSuchObjectException e) { - throw new HiveMetaStoreException("Hive table not found: " + table.getDbName() + "." + table.getTableName()); + throw new HiveMetaStoreException("Hive table not found: " + table.getDbName() + "." + tableNameConverter(table.getTableName())); } catch (AlreadyExistsException e) { // this is okey log.warn("Hive table already exists: {}.{}", table.getDbName(), table.getTableName()); @@ -209,7 +210,7 @@ public void alterTable(final Table table) throws HiveMetaStoreException { ClientAction alter = new ClientAction() { @Override public Void call() throws TException { - client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); + client.alter_table(table.getDbName(), tableNameConverter(table.getTableName()), table.getTTable()); return null; } }; @@ -233,7 +234,7 @@ public void dropTable(final String database, final String tableName) { ClientAction drop = new ClientAction() { @Override public Void call() throws TException { - client.dropTable(database, tableName, false, true); + client.dropTable(database, tableNameConverter(tableName), false, true); return null; } }; @@ -253,7 +254,7 @@ public boolean tableExists(final String database, final String tableName) throws ClientAction exists = new ClientAction() { @Override public Boolean call() throws TException { - return client.tableExists(database, tableName); + return client.tableExists(database, tableNameConverter(tableName)); } }; try { @@ -271,7 +272,7 @@ public Table getTable(final String database, final String tableName) throws Hive ClientAction getTable = new ClientAction
() { @Override public Table call() throws TException { - return new Table(client.getTable(database, tableName)); + return new Table(client.getTable(database, tableNameConverter(tableName))); } }; @@ -279,7 +280,7 @@ public Table call() throws TException { try { table = doAction(getTable); } catch (NoSuchObjectException e) { - throw new HiveMetaStoreException("Hive table not found: " + database + "." + tableName); + throw new HiveMetaStoreException("Hive table not found: " + database + "." + tableNameConverter(tableName)); } catch (MetaException e) { throw new HiveMetaStoreException("Hive table lookup exception", e); } catch (TException e) { @@ -287,7 +288,7 @@ public Table call() throws TException { } if (table == null) { - throw new HiveMetaStoreException("Could not find info for table: " + tableName); + throw new HiveMetaStoreException("Could not find info for table: " + tableNameConverter(tableName)); } return table; } @@ -296,7 +297,7 @@ public List listPartitions(final String database, final String tableName ClientAction> listPartitions = new ClientAction>() { @Override public List call() throws TException { - List partitions = client.listPartitions(database, tableName, max); + List partitions = client.listPartitions(database, tableNameConverter(tableName), max); List paths = new ArrayList<>(); for (Partition partition : partitions) { paths.add(partition.getSd().getLocation()); @@ -337,12 +338,12 @@ public List call() throws TException { public List getAllDatabases() throws HiveMetaStoreException { ClientAction> create = - new ClientAction>() { - @Override - public List call() throws TException { - return client.getAllDatabases(); - } - }; + new ClientAction>() { + @Override + public List call() throws TException { + return client.getAllDatabases(); + } + }; try { return doAction(create); @@ -354,4 +355,8 @@ public List call() throws TException { throw new HiveMetaStoreException("Exception communicating with the Hive MetaStore", e); } } -} + + public String tableNameConverter(String table){ + return table == null ? table : table.replaceAll("\\.", "_"); + } +} \ No newline at end of file diff --git a/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java b/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java index fa95bf7..7de26e6 100644 --- a/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java +++ b/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java @@ -14,6 +14,7 @@ package io.confluent.connect.hdfs.hive; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.kafka.connect.data.Schema; import io.confluent.connect.avro.AvroData; @@ -38,4 +39,8 @@ public HiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, Hive public abstract void createTable(String database, String tableName, Schema schema, Partitioner partitioner); public abstract void alterSchema(String database, String tableName, Schema schema); + + public Table newTable(String database, String table){ + return new Table(database, hiveMetaStore.tableNameConverter(table)); + } } diff --git a/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java b/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java index 0664b8c..9fa6aaa 100644 --- a/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java +++ b/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java @@ -55,7 +55,7 @@ public void alterSchema(String database, String tableName, Schema schema) { } private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException { - Table table = new Table(database, tableName); + Table table = newTable(database, tableName); table.setTableType(TableType.EXTERNAL_TABLE); table.getParameters().put("EXTERNAL", "TRUE"); String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName); diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java index 7679ca9..039fc1d 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java @@ -44,12 +44,14 @@ public class HdfsSinkConnectorTestBase { protected MockSinkTaskContext context; protected static final String TOPIC = "topic"; + protected static final String TOPIC_WITH_DOTS = "topic.with.dots"; protected static final int PARTITION = 12; protected static final int PARTITION2 = 13; protected static final int PARTITION3 = 14; protected static final TopicPartition TOPIC_PARTITION = new TopicPartition(TOPIC, PARTITION); protected static final TopicPartition TOPIC_PARTITION2 = new TopicPartition(TOPIC, PARTITION2); protected static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); + protected static final TopicPartition TOPIC_WITH_DOTS_PARTITION = new TopicPartition(TOPIC_WITH_DOTS, PARTITION); protected static Set assignment; diff --git a/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java index 1565836..bd6a983 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java @@ -147,6 +147,54 @@ public void testHiveIntegrationAvro() throws Exception { assertEquals(expectedPartitions, partitions); } + @Test + public void testHiveIntegrationTopicWithDotsAvro() throws Exception { + assignment.add(TOPIC_WITH_DOTS_PARTITION); + + Map props = createProps(); + props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true"); + HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); + + DataWriter hdfsWriter = new DataWriter(config, context, avroData); + hdfsWriter.recover(TOPIC_WITH_DOTS_PARTITION); + + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + Collection sinkRecords = new ArrayList<>(); + for (long offset = 0; offset < 7; offset++) { + SinkRecord sinkRecord = + new SinkRecord(TOPIC_WITH_DOTS, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); + + sinkRecords.add(sinkRecord); + } + + hdfsWriter.write(sinkRecords); + hdfsWriter.close(assignment); + hdfsWriter.stop(); + + Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC_WITH_DOTS); + List expectedColumnNames = new ArrayList<>(); + for (Field field: schema.fields()) { + expectedColumnNames.add(field.name()); + } + + List actualColumnNames = new ArrayList<>(); + for (FieldSchema column: table.getSd().getCols()) { + actualColumnNames.add(column.getName()); + } + assertEquals(expectedColumnNames, actualColumnNames); + + List expectedPartitions = new ArrayList<>(); + String directory = TOPIC_WITH_DOTS + "/" + "partition=" + String.valueOf(PARTITION); + expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory)); + + List partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC_WITH_DOTS, (short)-1); + + assertEquals(expectedPartitions, partitions); + } + @Test public void testHiveIntegrationFieldPartitionerAvro() throws Exception { Map props = createProps(); From fb2b0aba845e2d3d9e3e990d7be36ae9b28b7bf9 Mon Sep 17 00:00:00 2001 From: Michael Pearce Date: Sat, 14 Jan 2017 05:44:18 +0000 Subject: [PATCH 12/62] accidental un-needed format change, revert --- .../confluent/connect/hdfs/hive/HiveMetaStore.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java index 7d533db..05d8879 100644 --- a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java +++ b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java @@ -338,12 +338,12 @@ public List call() throws TException { public List getAllDatabases() throws HiveMetaStoreException { ClientAction> create = - new ClientAction>() { - @Override - public List call() throws TException { - return client.getAllDatabases(); - } - }; + new ClientAction>() { + @Override + public List call() throws TException { + return client.getAllDatabases(); + } + }; try { return doAction(create); From 0f754b842dfbba50adf746d710c17ffac5b22e53 Mon Sep 17 00:00:00 2001 From: Michael Pearce Date: Sat, 14 Jan 2017 06:08:33 +0000 Subject: [PATCH 13/62] remove accidental new line added --- .../java/io/confluent/connect/hdfs/hive/HiveMetaStore.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java index 05d8879..dca869a 100644 --- a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java +++ b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java @@ -182,7 +182,6 @@ public void createTable(final Table table) throws HiveMetaStoreException { ClientAction create = new ClientAction() { @Override public Void call() throws TException { - client.createTable(table.getTTable()); return null; } @@ -359,4 +358,4 @@ public List call() throws TException { public String tableNameConverter(String table){ return table == null ? table : table.replaceAll("\\.", "_"); } -} \ No newline at end of file +} From b0c48398a3aefa2c9f37bfe00c0bb52ec7a43085 Mon Sep 17 00:00:00 2001 From: Rekha Joshi Date: Thu, 19 Jan 2017 08:49:27 -0800 Subject: [PATCH 14/62] Updated Readme for Kafka connect FAQ (#75) * Update Readme * Update Readme --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5de59d3..42e7636 100644 --- a/README.md +++ b/README.md @@ -10,13 +10,16 @@ Documentation for this connector can be found [here](http://docs.confluent.io/cu To build a development version you'll need a recent version of Kafka. You can build kafka-connect-hdfs with Maven using the standard lifecycle phases. +# FAQ + +Refer frequently asked questions on Kafka Connect HDFS here - +https://github.com/confluentinc/kafka-connect-hdfs/wiki/FAQ # Contribute - Source Code: https://github.com/confluentinc/kafka-connect-hdfs - Issue Tracker: https://github.com/confluentinc/kafka-connect-hdfs/issues - # License The project is licensed under the Apache 2 license. From 9841a9068b3a9751fd05ee817ad8b27139f4bf43 Mon Sep 17 00:00:00 2001 From: Kaufman Ng Date: Thu, 19 Jan 2017 11:53:19 -0500 Subject: [PATCH 15/62] updated avro-tools instructions in doc (#65) --- docs/hdfs_connector.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index 1b77dc2..2c5a0fc 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -75,7 +75,9 @@ You can use ``avro-tools-1.7.7.jar`` to extract the content of the file:: $ hadoop jar avro-tools-1.7.7.jar tojson \ - /topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro + hdfs:///topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro + +where "" is the HDFS name node hostname. You should see the following output:: From 5b21d218eb0341f580d068a2270966ffdc938cab Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Wed, 25 Jan 2017 11:08:46 -0800 Subject: [PATCH 16/62] Bump version to 3.3.0-SNAPSHOT - Automatic commit via: tbr apply-feature-freeze -e prod --no-dry-run PLATFORM --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index c4a7582..594b923 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.0-SNAPSHOT + 3.3.0-SNAPSHOT kafka-connect-hdfs Confluent, Inc. From cf40f3be0ad409f1f7c2bedecccd751e92e32275 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Wed, 25 Jan 2017 11:10:07 -0800 Subject: [PATCH 17/62] Bump Confluent to 3.3.0-SNAPSHOT, Kafka to 0.10.3.0-SNAPSHOT - Automatic commit via: tbr apply-feature-freeze -e prod --no-dry-run PLATFORM --- docs/conf.py | 2 +- pom.xml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 7f34f96..a738b38 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.0-SNAPSHOT' +release = '3.3.0-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 594b923..f7bfb0c 100644 --- a/pom.xml +++ b/pom.xml @@ -50,8 +50,8 @@ - 3.2.0-SNAPSHOT - 0.10.2.0-SNAPSHOT + 3.3.0-SNAPSHOT + 0.10.3.0-SNAPSHOT 4.12 2.6.1 1.2.1 From 9196b6c9d32a581f97ff6e7d5fa3b1ae10c4aa33 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Wed, 25 Jan 2017 11:12:06 -0800 Subject: [PATCH 18/62] Bump version to 3.2.0-SNAPSHOT - Automatic commit via: tbr apply-feature-freeze -e prod --no-dry-run PLATFORM From 34c90886a13678e84ac1036f39299182103f5502 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Wed, 8 Feb 2017 16:22:13 -0800 Subject: [PATCH 19/62] MINOR: Upgrade Hadoop version to 2.7.3 and joda-time to 2.9.7 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index c4a7582..19e78af 100644 --- a/pom.xml +++ b/pom.xml @@ -53,12 +53,12 @@ 3.2.0-SNAPSHOT 0.10.2.0-SNAPSHOT 4.12 - 2.6.1 + 2.7.3 1.2.1 1.7.7 1.7.0 2.4 - 1.6.2 + 2.9.7 UTF-8 http://packages.confluent.io/maven/ From 4e357b5944a3f4482521b8564cec6a5147989ab3 Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Mon, 27 Feb 2017 12:51:36 -0800 Subject: [PATCH 20/62] Handle primitive types in AvroRecordWriterProvider. (#176) * Handle primitive types in AvroRecordWriterProvider. * Use named variable in test instead of repeating constants. --- .../hdfs/avro/AvroRecordWriterProvider.java | 8 +++- .../connect/hdfs/HdfsSinkTaskTest.java | 44 +++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java b/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java index f637f4b..bbf89b3 100644 --- a/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java +++ b/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java @@ -16,6 +16,7 @@ package io.confluent.connect.hdfs.avro; +import io.confluent.kafka.serializers.NonRecordContainer; import org.apache.avro.file.DataFileWriter; import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.io.DatumWriter; @@ -61,7 +62,12 @@ public RecordWriter getRecordWriter(Configuration conf, final String public void write(SinkRecord record) throws IOException { log.trace("Sink record: {}", record.toString()); Object value = avroData.fromConnectData(schema, record.value()); - writer.append(value); + // AvroData wraps primitive types so their schema can be included. We need to unwrap NonRecordContainers to just + // their value to properly handle these types + if (value instanceof NonRecordContainer) + writer.append(((NonRecordContainer) value).getValue()); + else + writer.append(value); } @Override diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java index 8a028ae..746d63d 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java @@ -14,6 +14,7 @@ package io.confluent.connect.hdfs; +import io.confluent.kafka.serializers.NonRecordContainer; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.connect.data.Schema; @@ -179,6 +180,49 @@ public void testSinkTaskPut() throws Exception { } } + @Test + public void testSinkTaskPutPrimitive() throws Exception { + Map props = createProps(); + HdfsSinkTask task = new HdfsSinkTask(); + + final String key = "key"; + final Schema schema = Schema.INT32_SCHEMA; + final int record = 12; + Collection sinkRecords = new ArrayList<>(); + for (TopicPartition tp: assignment) { + for (long offset = 0; offset < 7; offset++) { + SinkRecord sinkRecord = + new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); + sinkRecords.add(sinkRecord); + } + } + task.initialize(context); + task.start(props); + task.put(sinkRecords); + task.stop(); + + AvroData avroData = task.getAvroData(); + // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close + long[] validOffsets = {-1, 2, 5}; + + for (TopicPartition tp : assignment) { + String directory = tp.topic() + "/" + "partition=" + String.valueOf(tp.partition()); + for (int j = 1; j < validOffsets.length; ++j) { + long startOffset = validOffsets[j - 1] + 1; + long endOffset = validOffsets[j]; + Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory, tp, + startOffset, endOffset, extension, + ZERO_PAD_FMT)); + Collection records = schemaFileReader.readData(conf, path); + long size = endOffset - startOffset + 1; + assertEquals(records.size(), size); + for (Object avroRecord : records) { + assertEquals(avroRecord, record); + } + } + } + } + private void createCommittedFiles() throws IOException { String file1 = FileUtils.committedFileName(url, topicsDir, DIRECTORY1, TOPIC_PARTITION, 0, 10, extension, ZERO_PAD_FMT); From 535be4702a400bb3e7ef189667849d37b11c174e Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Mon, 27 Feb 2017 13:53:02 -0800 Subject: [PATCH 21/62] Set Confluent to 3.2.0, Kafka to 0.10.2.0-cp1. - Automatic commit via: tbr apply-rc-tag --rc 6 --confluent-patch 1 --no-dry-run -e prod kafka-connect-hdfs --- docs/conf.py | 2 +- pom.xml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 7f34f96..9dabdac 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.0-SNAPSHOT' +release = '3.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 19e78af..c388101 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.0-SNAPSHOT + 3.2.0 kafka-connect-hdfs Confluent, Inc. @@ -50,8 +50,8 @@ - 3.2.0-SNAPSHOT - 0.10.2.0-SNAPSHOT + 3.2.0 + 0.10.2.0-cp1 4.12 2.7.3 1.2.1 From da94c38eb17961c390006669b3312c6b8e722739 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Wed, 1 Mar 2017 18:00:54 -0800 Subject: [PATCH 22/62] Bump version to 3.2.1-SNAPSHOT - Automatic commit via: pint set-version --branch 3.2.x --version 3.2.1-SNAPSHOT --no-validate-version -e prod camus PLATFORM --no-dry-run --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 19e78af..055316b 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.0-SNAPSHOT + 3.2.1-SNAPSHOT kafka-connect-hdfs Confluent, Inc. From 58f6c8521dbd313f4e0f1d9e156c51d4e117f923 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Wed, 1 Mar 2017 18:02:45 -0800 Subject: [PATCH 23/62] Bump Confluent to 3.2.1-SNAPSHOT - Automatic commit via: pint bump-deps --branch 3.2.x --confluent-version 3.2.1-SNAPSHOT -e prod camus PLATFORM --no-dry-run --- docs/conf.py | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 7f34f96..1fd3db8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.0-SNAPSHOT' +release = '3.2.1-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 055316b..64df5df 100644 --- a/pom.xml +++ b/pom.xml @@ -50,7 +50,7 @@ - 3.2.0-SNAPSHOT + 3.2.1-SNAPSHOT 0.10.2.0-SNAPSHOT 4.12 2.7.3 From 8c0596bbe877a0f72016d46eaa8a8cda97bb6014 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Wed, 1 Mar 2017 18:11:01 -0800 Subject: [PATCH 24/62] Bump Kafka to 0.10.2.1-SNAPSHOT - Automatic commit via: pint bump-deps --branch 3.2.x --kafka-version 0.10.2.1-SNAPSHOT -e prod camus PLATFORM --no-dry-run --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 64df5df..79e3c39 100644 --- a/pom.xml +++ b/pom.xml @@ -51,7 +51,7 @@ 3.2.1-SNAPSHOT - 0.10.2.0-SNAPSHOT + 0.10.2.1-SNAPSHOT 4.12 2.7.3 1.2.1 From 9afe7130b1b24dc55112275c658438e861c6132b Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Fri, 3 Mar 2017 17:05:15 -0800 Subject: [PATCH 25/62] Bump Kafka to 0.11.0.0-SNAPSHOT - Automatic commit via: pint bump-deps --kafka-version 0.11.0.0-SNAPSHOT --branch master PLATFORM -e prod --no-dry-run --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fab4caa..5af1775 100644 --- a/pom.xml +++ b/pom.xml @@ -51,7 +51,7 @@ 3.3.0-SNAPSHOT - 0.10.3.0-SNAPSHOT + 0.11.0.0-SNAPSHOT 4.12 2.7.3 1.2.1 From ef68aa686583e9a1bcc5a2feea1959e98d381790 Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Mon, 13 Mar 2017 15:23:34 -0700 Subject: [PATCH 26/62] CC-437: Update 3.2.0 changelog --- docs/changelog.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 64a88b5..be5649f 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -3,6 +3,12 @@ Changelog ========= +Version 3.2.0 +------------- + +* `PR-135 `_ - Fix typos +* `PR-164 `_ - Issue 136 - Support topic with dots in hive. + Version 3.1.1 ------------- No changes From 241eb77eef58866aaa4967a7b4b7bbe0e1b61c16 Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Wed, 15 Mar 2017 14:54:37 -0700 Subject: [PATCH 27/62] Add missing PR-170 to release notes. --- docs/changelog.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index be5649f..ab2ed35 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -8,6 +8,7 @@ Version 3.2.0 * `PR-135 `_ - Fix typos * `PR-164 `_ - Issue 136 - Support topic with dots in hive. +* `PR-170 `_ - MINOR: Upgrade Hadoop version to 2.7.3 and joda-time to 2.9.7 Version 3.1.1 ------------- From 449ef58e52124bcd0597452a16fe468c82a76330 Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Tue, 11 Apr 2017 13:32:33 -0700 Subject: [PATCH 28/62] Add 3.2.1 changelog. --- docs/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index ab2ed35..3447d44 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -3,6 +3,10 @@ Changelog ========= +Version 3.2.1 +------------- +No changes + Version 3.2.0 ------------- From 8b439a0af36347e62412a7ac8f34ba423f8a1bf6 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Mon, 1 May 2017 14:34:51 -0700 Subject: [PATCH 29/62] Set Confluent to 3.2.1, Kafka to 0.10.2.1-cp1. - Automatic commit via: tbr apply-rc-tag --rc 5 --confluent-patch 1 -e prod --no-dry-run PLATFORM --- docs/conf.py | 2 +- pom.xml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 1fd3db8..e438e23 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.1-SNAPSHOT' +release = '3.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 79e3c39..9069f65 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.1-SNAPSHOT + 3.2.1 kafka-connect-hdfs Confluent, Inc. @@ -50,8 +50,8 @@ - 3.2.1-SNAPSHOT - 0.10.2.1-SNAPSHOT + 3.2.1 + 0.10.2.1-cp1 4.12 2.7.3 1.2.1 From 64598ed6e3157c50c1c720f0196e2930c9d49136 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Thu, 4 May 2017 11:04:32 -0700 Subject: [PATCH 30/62] Bump version to 3.2.2-SNAPSHOT - Automatic commit via: pint set-version --branch 3.2.x --version 3.2.2-SNAPSHOT --no-validate-version -e prod PLATFORM --no-dry-run --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 79e3c39..3e7ff8c 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.1-SNAPSHOT + 3.2.2-SNAPSHOT kafka-connect-hdfs Confluent, Inc. From cee729bf4367c96c60f1cc0bddb47e8262a10cee Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Thu, 4 May 2017 11:08:18 -0700 Subject: [PATCH 31/62] Bump Confluent to 3.2.2-SNAPSHOT, Kafka to 0.10.2.2-SNAPSHOT - Automatic commit via: pint bump-deps --branch 3.2.x --confluent-version 3.2.2-SNAPSHOT --kafka-version 0.10.2.2-SNAPSHOT -e prod PLATFORM --no-dry-run --- docs/conf.py | 2 +- pom.xml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 1fd3db8..a63635a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.1-SNAPSHOT' +release = '3.2.2-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 3e7ff8c..2b2ffd1 100644 --- a/pom.xml +++ b/pom.xml @@ -50,8 +50,8 @@ - 3.2.1-SNAPSHOT - 0.10.2.1-SNAPSHOT + 3.2.2-SNAPSHOT + 0.10.2.2-SNAPSHOT 4.12 2.7.3 1.2.1 From e7862480b3b34cfd6445dbf50a9be9109942c317 Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Thu, 18 May 2017 17:11:33 -0700 Subject: [PATCH 32/62] Fix HdfsSinkConnector to extend from SinkConnector instead of Connector. --- .../java/io/confluent/connect/hdfs/HdfsSinkConnector.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java index 8b365f0..909928b 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java @@ -17,9 +17,9 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.connect.connector.Connector; import org.apache.kafka.connect.connector.Task; import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.sink.SinkConnector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,7 @@ /** * HdfsSinkConnector is a Kafka Connect Connector implementation that ingest data from Kafka to HDFS. */ -public class HdfsSinkConnector extends Connector { +public class HdfsSinkConnector extends SinkConnector { private static final Logger log = LoggerFactory.getLogger(HdfsSinkConnector.class); private Map configProperties; From cc376111eaceb48f20a64cdd4f50aab899c37b39 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Thu, 20 Apr 2017 14:25:39 -0700 Subject: [PATCH 33/62] CC-491: Consolidate and simplify unit tests of HDFS connector --- .../connect/hdfs/avro/DataWriterAvroTest.java | 619 ++++++++---------- 1 file changed, 277 insertions(+), 342 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index c22f2f2..1e3d26b 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -35,15 +35,20 @@ import io.confluent.connect.hdfs.storage.StorageFactory; import io.confluent.connect.hdfs.wal.WAL; +import java.io.FileNotFoundException; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -52,47 +57,23 @@ public class DataWriterAvroTest extends TestWithMiniDFSCluster { private static final String extension = ".avro"; private static final String ZERO_PAD_FMT = "%010d"; private SchemaFileReader schemaFileReader = new AvroFileReader(avroData); - + Partitioner partitioner; + @Test public void testWriteRecord() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); - Partitioner partitioner = hdfsWriter.getPartitioner(); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); + List sinkRecords = createRecords(7); - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - - sinkRecords.add(sinkRecord); - } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String encodedPartition = "partition=" + String.valueOf(PARTITION); - String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); - // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {-1, 2, 5}; - for (int i = 1; i < validOffsets.length; i++) { - long startOffset = validOffsets[i - 1] + 1; - long endOffset = validOffsets[i]; - Path path = - new Path(FileUtils - .committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, - endOffset, extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets); } @Test @@ -103,99 +84,58 @@ public void testRecovery() throws Exception { Class storageClass = (Class) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG)); Storage storage = StorageFactory.createStorage(storageClass, conf, url); + DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); WAL wal = storage.wal(logsDir, TOPIC_PARTITION); wal.append(WAL.beginMarker, ""); - Set committedFiles = new HashSet<>(); - - String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); for (int i = 0; i < 5; ++i) { long startOffset = i * 10; long endOffset = (i + 1) * 10 - 1; - String tempfile = FileUtils.tempFileName(url, topicsDir, directory, extension); + String tempfile = FileUtils.tempFileName(url, topicsDir, getDirectory(), extension); fs.createNewFile(new Path(tempfile)); - String committedFile = FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, + String committedFile = FileUtils.committedFileName(url, topicsDir, getDirectory(), TOPIC_PARTITION, startOffset, endOffset, extension, ZERO_PAD_FMT); - committedFiles.add(committedFile); wal.append(tempfile, committedFile); } wal.append(WAL.endMarker, ""); wal.close(); - DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); Map offsets = context.offsets(); assertTrue(offsets.containsKey(TOPIC_PARTITION)); assertEquals(50L, (long) offsets.get(TOPIC_PARTITION)); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - // Need enough records to trigger file rotation - ArrayList sinkRecords = new ArrayList<>(); - for (int i = 0; i < 3; i++) - sinkRecords.add( - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 50 + i)); + List sinkRecords = createRecords(3, 50); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - committedFiles.add(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, - 50, 52, extension, ZERO_PAD_FMT)); - FileStatus[] statuses = fs.listStatus(new Path(FileUtils.directoryName(url, topicsDir, directory)), - new TopicPartitionCommittedFileFilter(TOPIC_PARTITION)); - assertEquals(committedFiles.size(), statuses.length); - for (FileStatus status : statuses) { - assertTrue(committedFiles.contains(status.getPath().toString())); - } + long[] validOffsets = {0, 10, 20, 30, 40, 50, 53}; + verifyFileListing(validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); } @Test public void testWriteRecordMultiplePartitions() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); for (TopicPartition tp: assignment) { hdfsWriter.recover(tp); } - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Collection sinkRecords = new ArrayList<>(); - for (TopicPartition tp: assignment) { - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } - } + List sinkRecords = createRecords(7, 0, assignment); + hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {-1, 2, 5}; - - for (TopicPartition tp : assignment) { - String directory = tp.topic() + "/" + "partition=" + String.valueOf(tp.partition()); - for (int j = 1; j < validOffsets.length; ++j) { - long startOffset = validOffsets[j - 1] + 1; - long endOffset = validOffsets[j]; - Path path = new Path( - FileUtils.committedFileName(url, topicsDir, directory, tp, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroRecord, avroData.fromConnectData(schema, record)); - } - } - } + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets, assignment); } @Test @@ -229,65 +169,33 @@ public void testGetPreviousOffsets() throws Exception { } @Test - public void testWriteRecordNonZeroInitailOffset() throws Exception { + public void testWriteRecordNonZeroInitialOffset() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); - Partitioner partitioner = hdfsWriter.getPartitioner(); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 3; offset < 10; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createRecords(7, 3); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String directory = partitioner.generatePartitionedPath(TOPIC, "partition=" + String.valueOf(PARTITION)); - // Last file (offset 9) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {2, 5, 8}; - for (int i = 1; i < validOffsets.length; i++) { - long startOffset = validOffsets[i - 1] + 1; - long endOffset = validOffsets[i]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory, - TOPIC_PARTITION, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsets = {3, 6, 9}; + verify(sinkRecords, validOffsets); } @Test public void testRebalance() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); // Initial assignment is {TP1, TP2} for (TopicPartition tp: assignment) { hdfsWriter.recover(tp); } - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Collection sinkRecords = new ArrayList<>(); - for (TopicPartition tp: assignment) { - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } - } + List sinkRecords = createRecords(7, 0, assignment); hdfsWriter.write(sinkRecords); Set oldAssignment = new HashSet<>(assignment); @@ -305,72 +213,26 @@ public void testRebalance() throws Exception { assertNotNull(hdfsWriter.getBucketWriter(TOPIC_PARTITION3)); // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsetsTopicPartition2 = {-1, 2, 5}; - String directory2 = TOPIC + "/" + "partition=" + String.valueOf(PARTITION2); - for (int j = 1; j < validOffsetsTopicPartition2.length; ++j) { - long startOffset = validOffsetsTopicPartition2[j - 1] + 1; - long endOffset = validOffsetsTopicPartition2[j]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory2, - TOPIC_PARTITION2, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsetsTopicPartition2 = {0, 3, 6}; + verify(sinkRecords, validOffsetsTopicPartition2, Collections.singleton(TOPIC_PARTITION2), true); - sinkRecords.clear(); - for (TopicPartition tp: assignment) { - // Message offsets start at 6 because we discarded the in-progress temp file on rebalance - for (long offset = 6; offset < 10; offset++) { - SinkRecord sinkRecord = - new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } - } + // Message offsets start at 6 because we discarded the in-progress temp file on re-balance + sinkRecords = createRecords(3, 6, assignment); hdfsWriter.write(sinkRecords); hdfsWriter.close(newAssignment); hdfsWriter.stop(); // Last file (offset 9) doesn't satisfy size requirement and gets discarded on close - long[] validOffsetsTopicPartition1 = {5, 8}; - String directory1 = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - for (int j = 1; j < validOffsetsTopicPartition1.length; ++j) { - long startOffset = validOffsetsTopicPartition1[j - 1] + 1; - long endOffset = validOffsetsTopicPartition1[j]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory1 , - TOPIC_PARTITION, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsetsTopicPartition1 = {6, 9}; + verify(sinkRecords, validOffsetsTopicPartition1, Collections.singleton(TOPIC_PARTITION), true); + + long[] validOffsetsTopicPartition3 = {6, 9}; + verify(sinkRecords, validOffsetsTopicPartition3, Collections.singleton(TOPIC_PARTITION3), true); - long[] validOffsetsTopicPartition3 = {5, 8}; - String directory3 = TOPIC + "/" + "partition=" + String.valueOf(PARTITION3); - for (int j = 1; j < validOffsetsTopicPartition3.length; ++j) { - long startOffset = validOffsetsTopicPartition3[j - 1] + 1; - long endOffset = validOffsetsTopicPartition3[j]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory3, - TOPIC_PARTITION3, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } assignment = oldAssignment; } - @Test public void testProjectBackWard() throws Exception { Map props = createProps(); @@ -379,53 +241,16 @@ public void testProjectBackWard() throws Exception { HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); + List sinkRecords = createRecordsWithAlteringSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - - String DIRECTORY = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - Path path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, - 0L, 1L, extension, ZERO_PAD_FMT)); - ArrayList records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - - Object projected = SchemaProjector.project(schema, record, newSchema); - assertEquals(avroData.fromConnectData(newSchema, projected), records.get(1)); - - hdfsWriter = new DataWriter(connectorConfig, context, avroData); - hdfsWriter.recover(TOPIC_PARTITION); - - sinkRecords.clear(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 2L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, - newRecord, 3L)); - - hdfsWriter.write(sinkRecords); - hdfsWriter.close(assignment); - hdfsWriter.stop(); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 2L, - 3L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, projected), records.get(0)); - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(1)); + long[] validOffsets = {0, 1, 3, 5, 7}; + verify(sinkRecords, validOffsets); } @Test @@ -435,58 +260,17 @@ public void testProjectNone() throws Exception { HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); - // Include one more to get to forced file rotation - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 2L)); - - hdfsWriter.write(sinkRecords); - hdfsWriter.close(assignment); - hdfsWriter.stop(); - - String DIRECTORY = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - Path path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, - 0L, 0L, extension, ZERO_PAD_FMT)); - ArrayList records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(1, records.size()); - - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 1L, - 2L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(avroData.fromConnectData(schema, record), records.get(0)); - assertEquals(avroData.fromConnectData(schema, record), records.get(1)); - - hdfsWriter = new DataWriter(connectorConfig, context, avroData); - hdfsWriter.recover(TOPIC_PARTITION); - - sinkRecords.clear(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 3L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 4L)); + List sinkRecords = createRecordsWithAlteringSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 3L, - 4L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(1)); + long[] validOffsets = {0, 1, 2, 3, 4, 5, 6}; + verify(sinkRecords, validOffsets); } @Test @@ -497,99 +281,44 @@ public void testProjectForward() throws Exception { HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); - // Include one more to get to forced file rotation - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 2L)); - - hdfsWriter.write(sinkRecords); - hdfsWriter.close(assignment); - hdfsWriter.stop(); - - String DIRECTORY = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - Path path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, - 0L, 0L, extension, ZERO_PAD_FMT)); - ArrayList records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(1, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 1L, - 2L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(avroData.fromConnectData(schema, record), records.get(0)); - assertEquals(avroData.fromConnectData(schema, record), records.get(1)); - - hdfsWriter = new DataWriter(connectorConfig, context, avroData); - hdfsWriter.recover(TOPIC_PARTITION); - - sinkRecords.clear(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 3L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 4L)); + // By excluding the first element we get a list starting with record having the new schema. + List sinkRecords = createRecordsWithAlteringSchemas(8, 0).subList(1, 8); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 3L, - 4L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(schema, record), records.get(0)); - assertEquals(avroData.fromConnectData(schema, record), records.get(1)); + long[] validOffsets = {1, 2, 4, 6, 8}; + verify(sinkRecords, validOffsets); } @Test public void testProjectNoVersion() throws Exception { - Schema schemaNoVersion = SchemaBuilder.struct().name("record") - .field("boolean", Schema.BOOLEAN_SCHEMA) - .field("int", Schema.INT32_SCHEMA) - .field("long", Schema.INT64_SCHEMA) - .field("float", Schema.FLOAT32_SCHEMA) - .field("double", Schema.FLOAT64_SCHEMA) - .build(); - - Struct recordNoVersion = new Struct(schemaNoVersion); - recordNoVersion.put("boolean", true) - .put("int", 12) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - Map props = createProps(); props.put(HdfsSinkConnectorConfig.SCHEMA_COMPATIBILITY_CONFIG, "BACKWARD"); HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schemaNoVersion, recordNoVersion, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); + List sinkRecords = createRecordsNoVersion(1, 0); + sinkRecords.addAll(createRecordsWithAlteringSchemas(7, 0)); try { hdfsWriter.write(sinkRecords); fail("Version is required for Backward compatibility."); } catch (RuntimeException e) { // expected + } finally { + hdfsWriter.close(assignment); + hdfsWriter.stop(); + long[] validOffsets = {}; + verify(Collections.emptyList(), validOffsets); } - hdfsWriter.close(assignment); - hdfsWriter.stop(); } @Test @@ -600,7 +329,7 @@ public void testFlushPartialFile() throws Exception { String FLUSH_SIZE_CONFIG = "10"; // send 1.5 * FLUSH_SIZE_CONFIG records - long NUMBER_OF_RECORD = Long.valueOf(FLUSH_SIZE_CONFIG) + Long.valueOf(FLUSH_SIZE_CONFIG) / 2; + int NUMBER_OF_RECORDS = Integer.valueOf(FLUSH_SIZE_CONFIG) + Integer.valueOf(FLUSH_SIZE_CONFIG) / 2; Map props = createProps(); props.put(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG, FLUSH_SIZE_CONFIG); @@ -610,17 +339,10 @@ public void testFlushPartialFile() throws Exception { assignment.add(TOPIC_PARTITION); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < NUMBER_OF_RECORD; offset++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createRecords(NUMBER_OF_RECORDS); hdfsWriter.write(sinkRecords); // wait for rotation to happen @@ -634,11 +356,224 @@ public void testFlushPartialFile() throws Exception { Map committedOffsets = hdfsWriter.getCommittedOffsets(); assertTrue(committedOffsets.containsKey(TOPIC_PARTITION)); long previousOffset = committedOffsets.get(TOPIC_PARTITION); - assertEquals(NUMBER_OF_RECORD, previousOffset); + assertEquals(NUMBER_OF_RECORDS, previousOffset); hdfsWriter.close(assignment); hdfsWriter.stop(); } + /** + * Return a list of new records starting at zero offset. + * + * @param size the number of records to return. + * @return the list of records. + */ + protected List createRecords(int size) { + return createRecords(size, 0); + } + + /** + * Return a list of new records starting at the given offset. + * + * @param size the number of records to return. + * @param startOffset the starting offset. + * @return the list of records. + */ + protected List createRecords(int size, long startOffset) { + return createRecords(size, startOffset, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); + } + + /** + * Return a list of new records for a set of partitions, starting at the given offset in each partition. + * + * @param size the number of records to return. + * @param startOffset the starting offset. + * @param partitions the set of partitions to create records for. + * @return the list of records. + */ + protected List createRecords(int size, long startOffset, Set partitions) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + List sinkRecords = new ArrayList<>(); + for (TopicPartition tp : partitions) { + for (long offset = startOffset; offset < startOffset + size; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); + } + } + return sinkRecords; + } + + protected List createRecordsNoVersion(int size, long startOffset) { + String key = "key"; + Schema schemaNoVersion = SchemaBuilder.struct().name("record") + .field("boolean", Schema.BOOLEAN_SCHEMA) + .field("int", Schema.INT32_SCHEMA) + .field("long", Schema.INT64_SCHEMA) + .field("float", Schema.FLOAT32_SCHEMA) + .field("double", Schema.FLOAT64_SCHEMA) + .build(); + + Struct recordNoVersion = new Struct(schemaNoVersion); + recordNoVersion.put("boolean", true) + .put("int", 12) + .put("long", 12L) + .put("float", 12.2f) + .put("double", 12.2); + + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset; offset < startOffset + size; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schemaNoVersion, + recordNoVersion, offset)); + } + return sinkRecords; + } + + protected List createRecordsWithAlteringSchemas(int size, long startOffset) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + Schema newSchema = createNewSchema(); + Struct newRecord = createNewRecord(newSchema); + + int limit = (size / 2) * 2; + boolean remainder = size % 2 > 0; + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset; offset < startOffset + limit; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset)); + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, ++offset)); + } + if (remainder) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, + startOffset + size - 1)); + } + return sinkRecords; + } + + protected List createRecordsInterleaved(int size, long startOffset, Set partitions) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset, total = 0; total < size; ++offset) { + for (TopicPartition tp : partitions) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); + if (++total >= size) { + break; + } + } + } + return sinkRecords; + } + + protected String getDirectory() { + return getDirectory(TOPIC, PARTITION); + } + + protected String getDirectory(String topic, int partition) { + String encodedPartition = "partition=" + String.valueOf(partition); + return partitioner.generatePartitionedPath(topic, encodedPartition); + } + + /** + * Verify files and records are uploaded appropriately. + * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. + * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. + * Offsets appear in ascending order, the difference between two consecutive offsets + * equals the expected size of the file, and last offset is exclusive. + */ + protected void verify(List sinkRecords, long[] validOffsets) throws IOException { + verify(sinkRecords, validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION)), false); + } + + protected void verify(List sinkRecords, long[] validOffsets, Set partitions) + throws IOException { + verify(sinkRecords, validOffsets, partitions, false); + } + + /** + * Verify files and records are uploaded appropriately. + * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. + * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. + * Offsets appear in ascending order, the difference between two consecutive offsets + * equals the expected size of the file, and last offset is exclusive. + * @param partitions the set of partitions to verify records for. + */ + protected void verify(List sinkRecords, long[] validOffsets, Set partitions, + boolean skipFileListing) throws IOException { + if (!skipFileListing) { + verifyFileListing(validOffsets, partitions); + } + + for (TopicPartition tp : partitions) { + for (int i = 1, j = 0; i < validOffsets.length; ++i) { + long startOffset = validOffsets[i - 1]; + long endOffset = validOffsets[i] - 1; + + String filename = FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, + startOffset, endOffset, extension, ZERO_PAD_FMT); + Path path = new Path(filename); + Collection records = schemaFileReader.readData(conf, path); + + long size = endOffset - startOffset + 1; + assertEquals(size, records.size()); + verifyContents(sinkRecords, j, records); + j += size; + } + } + } + + protected List getExpectedFiles(long[] validOffsets, TopicPartition tp) { + List expectedFiles = new ArrayList<>(); + for (int i = 1; i < validOffsets.length; ++i) { + long startOffset = validOffsets[i - 1]; + long endOffset = validOffsets[i] - 1; + expectedFiles.add(FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, + startOffset, endOffset, extension, ZERO_PAD_FMT)); + } + return expectedFiles; + } + + protected void verifyFileListing(long[] validOffsets, Set partitions) throws IOException { + for (TopicPartition tp : partitions) { + verifyFileListing(getExpectedFiles(validOffsets, tp), tp); + } + } + + protected void verifyFileListing(List expectedFiles, TopicPartition tp) throws IOException { + FileStatus[] statuses = {}; + try { + statuses = fs.listStatus( + new Path(FileUtils.directoryName(url, topicsDir, getDirectory(tp.topic(), tp.partition()))), + new TopicPartitionCommittedFileFilter(tp)); + } catch (FileNotFoundException e) { + // the directory does not exist. + } + + List actualFiles = new ArrayList<>(); + for (FileStatus status : statuses) { + actualFiles.add(status.getPath().toString()); + } + + Collections.sort(actualFiles); + Collections.sort(expectedFiles); + assertThat(actualFiles, is(expectedFiles)); + } + + protected void verifyContents(List expectedRecords, int startIndex, Collection records) { + Schema expectedSchema = null; + for (Object avroRecord : records) { + if (expectedSchema == null) { + expectedSchema = expectedRecords.get(startIndex).valueSchema(); + } + Object expectedValue = SchemaProjector.project(expectedRecords.get(startIndex).valueSchema(), + expectedRecords.get(startIndex++).value(), + expectedSchema); + assertEquals(avroData.fromConnectData(expectedSchema, expectedValue), avroRecord); + } + } + } From 02d1bb764860002a335264b68021fd8ab4cf3a2e Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Thu, 20 Apr 2017 15:46:44 -0700 Subject: [PATCH 34/62] Move verification methods and members to TestWithMiniDFSCluster --- .../connect/hdfs/TestWithMiniDFSCluster.java | 241 ++++++++++++++++ .../connect/hdfs/avro/DataWriterAvroTest.java | 257 ++---------------- .../hdfs/avro/TopicPartitionWriterTest.java | 17 +- .../hdfs/parquet/DataWriterParquetTest.java | 53 +--- 4 files changed, 277 insertions(+), 291 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java index 6e29a4f..efc92ea 100644 --- a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java +++ b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java @@ -17,18 +17,44 @@ package io.confluent.connect.hdfs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.connect.data.SchemaProjector; +import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.sink.SinkRecord; import org.junit.After; import org.junit.Before; +import java.io.FileNotFoundException; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Set; + +import io.confluent.connect.hdfs.filter.TopicPartitionCommittedFileFilter; +import io.confluent.connect.hdfs.partitioner.Partitioner; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; public class TestWithMiniDFSCluster extends HdfsSinkConnectorTestBase { protected MiniDFSCluster cluster; protected FileSystem fs; + protected SchemaFileReader schemaFileReader; + protected Partitioner partitioner; + protected String extension; + // The default based on default configuration of 10 + protected String zeroPadFormat = "%010d"; @Before public void setUp() throws Exception { @@ -69,4 +95,219 @@ protected Map createProps() { props.put(HdfsSinkConnectorConfig.HDFS_URL_CONFIG, url); return props; } + + + /** + * Return a list of new records starting at zero offset. + * + * @param size the number of records to return. + * @return the list of records. + */ + protected List createRecords(int size) { + return createRecords(size, 0); + } + + /** + * Return a list of new records starting at the given offset. + * + * @param size the number of records to return. + * @param startOffset the starting offset. + * @return the list of records. + */ + protected List createRecords(int size, long startOffset) { + return createRecords(size, startOffset, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); + } + + /** + * Return a list of new records for a set of partitions, starting at the given offset in each partition. + * + * @param size the number of records to return. + * @param startOffset the starting offset. + * @param partitions the set of partitions to create records for. + * @return the list of records. + */ + protected List createRecords(int size, long startOffset, Set partitions) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + List sinkRecords = new ArrayList<>(); + for (TopicPartition tp : partitions) { + for (long offset = startOffset; offset < startOffset + size; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); + } + } + return sinkRecords; + } + + protected List createRecordsNoVersion(int size, long startOffset) { + String key = "key"; + Schema schemaNoVersion = SchemaBuilder.struct().name("record") + .field("boolean", Schema.BOOLEAN_SCHEMA) + .field("int", Schema.INT32_SCHEMA) + .field("long", Schema.INT64_SCHEMA) + .field("float", Schema.FLOAT32_SCHEMA) + .field("double", Schema.FLOAT64_SCHEMA) + .build(); + + Struct recordNoVersion = new Struct(schemaNoVersion); + recordNoVersion.put("boolean", true) + .put("int", 12) + .put("long", 12L) + .put("float", 12.2f) + .put("double", 12.2); + + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset; offset < startOffset + size; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schemaNoVersion, + recordNoVersion, offset)); + } + return sinkRecords; + } + + protected List createRecordsWithAlteringSchemas(int size, long startOffset) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + Schema newSchema = createNewSchema(); + Struct newRecord = createNewRecord(newSchema); + + int limit = (size / 2) * 2; + boolean remainder = size % 2 > 0; + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset; offset < startOffset + limit; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset)); + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, ++offset)); + } + if (remainder) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, + startOffset + size - 1)); + } + return sinkRecords; + } + + protected List createRecordsInterleaved(int size, long startOffset, Set partitions) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset, total = 0; total < size; ++offset) { + for (TopicPartition tp : partitions) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); + if (++total >= size) { + break; + } + } + } + return sinkRecords; + } + + protected String getDirectory() { + return getDirectory(TOPIC, PARTITION); + } + + protected String getDirectory(String topic, int partition) { + String encodedPartition = "partition=" + String.valueOf(partition); + return partitioner.generatePartitionedPath(topic, encodedPartition); + } + + /** + * Verify files and records are uploaded appropriately. + * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. + * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. + * Offsets appear in ascending order, the difference between two consecutive offsets + * equals the expected size of the file, and last offset is exclusive. + */ + protected void verify(List sinkRecords, long[] validOffsets) throws IOException { + verify(sinkRecords, validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION)), false); + } + + protected void verify(List sinkRecords, long[] validOffsets, Set partitions) + throws IOException { + verify(sinkRecords, validOffsets, partitions, false); + } + + /** + * Verify files and records are uploaded appropriately. + * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. + * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. + * Offsets appear in ascending order, the difference between two consecutive offsets + * equals the expected size of the file, and last offset is exclusive. + * @param partitions the set of partitions to verify records for. + */ + protected void verify(List sinkRecords, long[] validOffsets, Set partitions, + boolean skipFileListing) throws IOException { + if (!skipFileListing) { + verifyFileListing(validOffsets, partitions); + } + + for (TopicPartition tp : partitions) { + for (int i = 1, j = 0; i < validOffsets.length; ++i) { + long startOffset = validOffsets[i - 1]; + long endOffset = validOffsets[i] - 1; + + String filename = FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, + startOffset, endOffset, extension, zeroPadFormat); + Path path = new Path(filename); + Collection records = schemaFileReader.readData(conf, path); + + long size = endOffset - startOffset + 1; + assertEquals(size, records.size()); + verifyContents(sinkRecords, j, records); + j += size; + } + } + } + + protected List getExpectedFiles(long[] validOffsets, TopicPartition tp) { + List expectedFiles = new ArrayList<>(); + for (int i = 1; i < validOffsets.length; ++i) { + long startOffset = validOffsets[i - 1]; + long endOffset = validOffsets[i] - 1; + expectedFiles.add(FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, + startOffset, endOffset, extension, zeroPadFormat)); + } + return expectedFiles; + } + + protected void verifyFileListing(long[] validOffsets, Set partitions) throws IOException { + for (TopicPartition tp : partitions) { + verifyFileListing(getExpectedFiles(validOffsets, tp), tp); + } + } + + protected void verifyFileListing(List expectedFiles, TopicPartition tp) throws IOException { + FileStatus[] statuses = {}; + try { + statuses = fs.listStatus( + new Path(FileUtils.directoryName(url, topicsDir, getDirectory(tp.topic(), tp.partition()))), + new TopicPartitionCommittedFileFilter(tp)); + } catch (FileNotFoundException e) { + // the directory does not exist. + } + + List actualFiles = new ArrayList<>(); + for (FileStatus status : statuses) { + actualFiles.add(status.getPath().toString()); + } + + Collections.sort(actualFiles); + Collections.sort(expectedFiles); + assertThat(actualFiles, is(expectedFiles)); + } + + protected void verifyContents(List expectedRecords, int startIndex, Collection records) { + Schema expectedSchema = null; + for (Object avroRecord : records) { + if (expectedSchema == null) { + expectedSchema = expectedRecords.get(startIndex).valueSchema(); + } + Object expectedValue = SchemaProjector.project(expectedRecords.get(startIndex).valueSchema(), + expectedRecords.get(startIndex++).value(), + expectedSchema); + assertEquals(avroData.fromConnectData(expectedSchema, expectedValue), avroRecord); + } + } + } diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index 1e3d26b..48e73a2 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -14,50 +14,40 @@ package io.confluent.connect.hdfs.avro; -import io.confluent.connect.hdfs.DataWriter; -import io.confluent.connect.hdfs.FileUtils; -import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; -import io.confluent.connect.hdfs.SchemaFileReader; -import io.confluent.connect.hdfs.TestWithMiniDFSCluster; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.data.SchemaBuilder; -import org.apache.kafka.connect.data.SchemaProjector; -import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.Before; import org.junit.Test; -import io.confluent.connect.hdfs.filter.TopicPartitionCommittedFileFilter; -import io.confluent.connect.hdfs.partitioner.Partitioner; -import io.confluent.connect.hdfs.storage.Storage; -import io.confluent.connect.hdfs.storage.StorageFactory; -import io.confluent.connect.hdfs.wal.WAL; - -import java.io.FileNotFoundException; -import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import static org.hamcrest.CoreMatchers.is; +import io.confluent.connect.hdfs.DataWriter; +import io.confluent.connect.hdfs.FileUtils; +import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; +import io.confluent.connect.hdfs.TestWithMiniDFSCluster; +import io.confluent.connect.hdfs.storage.Storage; +import io.confluent.connect.hdfs.storage.StorageFactory; +import io.confluent.connect.hdfs.wal.WAL; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class DataWriterAvroTest extends TestWithMiniDFSCluster { - private static final String extension = ".avro"; - private static final String ZERO_PAD_FMT = "%010d"; - private SchemaFileReader schemaFileReader = new AvroFileReader(avroData); - Partitioner partitioner; + @Before + public void setUp() throws Exception { + super.setUp(); + schemaFileReader = new AvroFileReader(avroData); + extension = ".avro"; + } @Test public void testWriteRecord() throws Exception { @@ -97,7 +87,7 @@ public void testRecovery() throws Exception { String tempfile = FileUtils.tempFileName(url, topicsDir, getDirectory(), extension); fs.createNewFile(new Path(tempfile)); String committedFile = FileUtils.committedFileName(url, topicsDir, getDirectory(), TOPIC_PARTITION, startOffset, - endOffset, extension, ZERO_PAD_FMT); + endOffset, extension, zeroPadFormat); wal.append(tempfile, committedFile); } wal.append(WAL.endMarker, ""); @@ -146,7 +136,7 @@ public void testGetPreviousOffsets() throws Exception { for (int i = 0; i < startOffsets.length; ++i) { Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffsets[i], - endOffsets[i], extension, ZERO_PAD_FMT)); + endOffsets[i], extension, zeroPadFormat)); fs.createNewFile(path); } Path path = new Path(FileUtils.tempFileName(url, topicsDir, directory, extension)); @@ -362,218 +352,5 @@ public void testFlushPartialFile() throws Exception { hdfsWriter.stop(); } - /** - * Return a list of new records starting at zero offset. - * - * @param size the number of records to return. - * @return the list of records. - */ - protected List createRecords(int size) { - return createRecords(size, 0); - } - - /** - * Return a list of new records starting at the given offset. - * - * @param size the number of records to return. - * @param startOffset the starting offset. - * @return the list of records. - */ - protected List createRecords(int size, long startOffset) { - return createRecords(size, startOffset, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); - } - - /** - * Return a list of new records for a set of partitions, starting at the given offset in each partition. - * - * @param size the number of records to return. - * @param startOffset the starting offset. - * @param partitions the set of partitions to create records for. - * @return the list of records. - */ - protected List createRecords(int size, long startOffset, Set partitions) { - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - List sinkRecords = new ArrayList<>(); - for (TopicPartition tp : partitions) { - for (long offset = startOffset; offset < startOffset + size; ++offset) { - sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); - } - } - return sinkRecords; - } - - protected List createRecordsNoVersion(int size, long startOffset) { - String key = "key"; - Schema schemaNoVersion = SchemaBuilder.struct().name("record") - .field("boolean", Schema.BOOLEAN_SCHEMA) - .field("int", Schema.INT32_SCHEMA) - .field("long", Schema.INT64_SCHEMA) - .field("float", Schema.FLOAT32_SCHEMA) - .field("double", Schema.FLOAT64_SCHEMA) - .build(); - - Struct recordNoVersion = new Struct(schemaNoVersion); - recordNoVersion.put("boolean", true) - .put("int", 12) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - List sinkRecords = new ArrayList<>(); - for (long offset = startOffset; offset < startOffset + size; ++offset) { - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schemaNoVersion, - recordNoVersion, offset)); - } - return sinkRecords; - } - - protected List createRecordsWithAlteringSchemas(int size, long startOffset) { - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - int limit = (size / 2) * 2; - boolean remainder = size % 2 > 0; - List sinkRecords = new ArrayList<>(); - for (long offset = startOffset; offset < startOffset + limit; ++offset) { - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, ++offset)); - } - if (remainder) { - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - startOffset + size - 1)); - } - return sinkRecords; - } - - protected List createRecordsInterleaved(int size, long startOffset, Set partitions) { - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - List sinkRecords = new ArrayList<>(); - for (long offset = startOffset, total = 0; total < size; ++offset) { - for (TopicPartition tp : partitions) { - sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); - if (++total >= size) { - break; - } - } - } - return sinkRecords; - } - - protected String getDirectory() { - return getDirectory(TOPIC, PARTITION); - } - - protected String getDirectory(String topic, int partition) { - String encodedPartition = "partition=" + String.valueOf(partition); - return partitioner.generatePartitionedPath(topic, encodedPartition); - } - - /** - * Verify files and records are uploaded appropriately. - * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. - * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. - * Offsets appear in ascending order, the difference between two consecutive offsets - * equals the expected size of the file, and last offset is exclusive. - */ - protected void verify(List sinkRecords, long[] validOffsets) throws IOException { - verify(sinkRecords, validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION)), false); - } - - protected void verify(List sinkRecords, long[] validOffsets, Set partitions) - throws IOException { - verify(sinkRecords, validOffsets, partitions, false); - } - - /** - * Verify files and records are uploaded appropriately. - * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. - * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. - * Offsets appear in ascending order, the difference between two consecutive offsets - * equals the expected size of the file, and last offset is exclusive. - * @param partitions the set of partitions to verify records for. - */ - protected void verify(List sinkRecords, long[] validOffsets, Set partitions, - boolean skipFileListing) throws IOException { - if (!skipFileListing) { - verifyFileListing(validOffsets, partitions); - } - - for (TopicPartition tp : partitions) { - for (int i = 1, j = 0; i < validOffsets.length; ++i) { - long startOffset = validOffsets[i - 1]; - long endOffset = validOffsets[i] - 1; - - String filename = FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, - startOffset, endOffset, extension, ZERO_PAD_FMT); - Path path = new Path(filename); - Collection records = schemaFileReader.readData(conf, path); - - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - verifyContents(sinkRecords, j, records); - j += size; - } - } - } - - protected List getExpectedFiles(long[] validOffsets, TopicPartition tp) { - List expectedFiles = new ArrayList<>(); - for (int i = 1; i < validOffsets.length; ++i) { - long startOffset = validOffsets[i - 1]; - long endOffset = validOffsets[i] - 1; - expectedFiles.add(FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, - startOffset, endOffset, extension, ZERO_PAD_FMT)); - } - return expectedFiles; - } - - protected void verifyFileListing(long[] validOffsets, Set partitions) throws IOException { - for (TopicPartition tp : partitions) { - verifyFileListing(getExpectedFiles(validOffsets, tp), tp); - } - } - - protected void verifyFileListing(List expectedFiles, TopicPartition tp) throws IOException { - FileStatus[] statuses = {}; - try { - statuses = fs.listStatus( - new Path(FileUtils.directoryName(url, topicsDir, getDirectory(tp.topic(), tp.partition()))), - new TopicPartitionCommittedFileFilter(tp)); - } catch (FileNotFoundException e) { - // the directory does not exist. - } - - List actualFiles = new ArrayList<>(); - for (FileStatus status : statuses) { - actualFiles.add(status.getPath().toString()); - } - - Collections.sort(actualFiles); - Collections.sort(expectedFiles); - assertThat(actualFiles, is(expectedFiles)); - } - - protected void verifyContents(List expectedRecords, int startIndex, Collection records) { - Schema expectedSchema = null; - for (Object avroRecord : records) { - if (expectedSchema == null) { - expectedSchema = expectedRecords.get(startIndex).valueSchema(); - } - Object expectedValue = SchemaProjector.project(expectedRecords.get(startIndex).valueSchema(), - expectedRecords.get(startIndex++).value(), - expectedSchema); - assertEquals(avroData.fromConnectData(expectedSchema, expectedValue), avroRecord); - } - } - } diff --git a/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java b/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java index 21b26f1..3b7ba95 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java @@ -52,13 +52,8 @@ import static org.junit.Assert.assertTrue; public class TopicPartitionWriterTest extends TestWithMiniDFSCluster { - // The default based on default configuration of 10 - private static final String ZERO_PAD_FMT = "%010d"; - private RecordWriterProvider writerProvider; - private SchemaFileReader schemaFileReader; private Storage storage; - private static String extension; @Before public void setUp() throws Exception { @@ -142,9 +137,9 @@ public void testWriteRecordFieldPartitioner() throws Exception { String directory3 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(18)); Set expectedFiles = new HashSet<>(); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory1, TOPIC_PARTITION, 0, 2, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory2, TOPIC_PARTITION, 3, 5, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory3, TOPIC_PARTITION, 6, 8, extension, ZERO_PAD_FMT))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory1, TOPIC_PARTITION, 0, 2, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory2, TOPIC_PARTITION, 3, 5, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory3, TOPIC_PARTITION, 6, 8, extension, zeroPadFormat))); verify(expectedFiles, records, schema); } @@ -183,9 +178,9 @@ public void testWriteRecordTimeBasedPartition() throws Exception { String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); Set expectedFiles = new HashSet<>(); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 0, 2, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 3, 5, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 6, 8, extension, ZERO_PAD_FMT))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 0, 2, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 3, 5, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 6, 8, extension, zeroPadFormat))); verify(expectedFiles, records, schema); } diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java index 0a0c399..6c71336 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java @@ -15,29 +15,26 @@ package io.confluent.connect.hdfs.parquet; -import org.apache.hadoop.fs.Path; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.Before; import org.junit.Test; -import java.util.ArrayList; -import java.util.Collection; +import java.util.List; import java.util.Map; import io.confluent.connect.hdfs.DataWriter; -import io.confluent.connect.hdfs.FileUtils; import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; -import io.confluent.connect.hdfs.SchemaFileReader; import io.confluent.connect.hdfs.TestWithMiniDFSCluster; import io.confluent.connect.hdfs.partitioner.Partitioner; -import static org.junit.Assert.assertEquals; - public class DataWriterParquetTest extends TestWithMiniDFSCluster { - private static final String ZERO_PAD_FMT = "%010d"; - private static final String extension = ".parquet"; - private final SchemaFileReader schemaFileReader = new ParquetFileReader(avroData); + + @Before + public void setUp() throws Exception { + super.setUp(); + schemaFileReader = new ParquetFileReader(avroData); + extension = ".parquet"; + } @Override protected Map createProps() { @@ -49,41 +46,17 @@ protected Map createProps() { @Test public void testWriteRecord() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); - Partitioner partitioner = hdfsWriter.getPartitioner(); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); + List sinkRecords = createRecords(7); - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - - sinkRecords.add(sinkRecord); - } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String encodedPartition = "partition=" + String.valueOf(PARTITION); - String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); - // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {-1, 2, 5}; - for (int i = 1; i < validOffsets.length; i++) { - long startOffset = validOffsets[i - 1] + 1; - long endOffset = validOffsets[i]; - Path path = new Path( - FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, - endOffset, extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets); } } From e939f757e60c7c02e638e7c6e5ac91d9f5f8179d Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Fri, 21 Apr 2017 10:39:47 -0700 Subject: [PATCH 35/62] Consolidate records creation methods in TopicPartitionWriterTest too. --- .../hdfs/HdfsSinkConnectorTestBase.java | 40 ++++++-- .../connect/hdfs/TestWithMiniDFSCluster.java | 44 +++++++-- .../connect/hdfs/avro/DataWriterAvroTest.java | 24 ++--- .../hdfs/avro/TopicPartitionWriterTest.java | 95 ++++++------------- .../hdfs/parquet/DataWriterParquetTest.java | 2 +- 5 files changed, 111 insertions(+), 94 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java index 039fc1d..b1e6158 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java @@ -25,8 +25,10 @@ import org.junit.After; import org.junit.Before; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -72,13 +74,18 @@ protected Schema createSchema() { .build(); } - protected Struct createRecord(Schema schema) { + // Create a batch of records with incremental numeric field values. Total number of records is given by 'size'. + protected Struct createRecord(Schema schema, int ibase, float fbase) { return new Struct(schema) - .put("boolean", true) - .put("int", 12) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); + .put("boolean", true) + .put("int", ibase) + .put("long", (long) ibase) + .put("float", fbase) + .put("double", (double) fbase); + } + + protected Struct createRecord(Schema schema) { + return createRecord(schema, 12, 12.2f); } protected Schema createNewSchema() { @@ -102,6 +109,27 @@ protected Struct createNewRecord(Schema newSchema) { .put("string", "def"); } + // Create a batch of records with incremental numeric field values. Total number of records is given by 'size'. + protected List createRecordBatch(Schema schema, int size) { + ArrayList records = new ArrayList<>(size); + int ibase = 16; + float fbase = 12.2f; + + for (int i = 0; i < size; ++i) { + records.add(createRecord(schema, ibase + i, fbase + i)); + } + return records; + } + + // Create a list of records by repeating the same record batch. Total number of records: 'batchesNum' x 'batchSize' + protected List createRecordBatches(Schema schema, int batchSize, int batchesNum) { + ArrayList records = new ArrayList<>(); + for (int i = 0; i < batchesNum; ++i) { + records.addAll(createRecordBatch(schema, batchSize)); + } + return records; + } + @Before public void setUp() throws Exception { conf = new Configuration(); diff --git a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java index efc92ea..abc658e 100644 --- a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java +++ b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java @@ -96,15 +96,14 @@ protected Map createProps() { return props; } - /** * Return a list of new records starting at zero offset. * * @param size the number of records to return. * @return the list of records. */ - protected List createRecords(int size) { - return createRecords(size, 0); + protected List createSinkRecords(int size) { + return createSinkRecords(size, 0); } /** @@ -114,8 +113,8 @@ protected List createRecords(int size) { * @param startOffset the starting offset. * @return the list of records. */ - protected List createRecords(int size, long startOffset) { - return createRecords(size, startOffset, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); + protected List createSinkRecords(int size, long startOffset) { + return createSinkRecords(size, startOffset, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); } /** @@ -126,7 +125,8 @@ protected List createRecords(int size, long startOffset) { * @param partitions the set of partitions to create records for. * @return the list of records. */ - protected List createRecords(int size, long startOffset, Set partitions) { + protected List createSinkRecords(int size, long startOffset, Set partitions) { + /* String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); @@ -138,9 +138,35 @@ protected List createRecords(int size, long startOffset, Set same = new ArrayList<>(); + for (int i = 0; i < size; ++i) { + same.add(record); + } + return createSinkRecords(same, schema, startOffset, partitions); + } + + protected List createSinkRecords(List records, Schema schema) { + return createSinkRecords(records, schema, 0, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); + } + + protected List createSinkRecords(List records, Schema schema, long startOffset, + Set partitions) { + String key = "key"; + List sinkRecords = new ArrayList<>(); + for (TopicPartition tp : partitions) { + long offset = startOffset; + for (Struct record : records) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset++)); + } + } + return sinkRecords; } - protected List createRecordsNoVersion(int size, long startOffset) { + protected List createSinkRecordsNoVersion(int size, long startOffset) { String key = "key"; Schema schemaNoVersion = SchemaBuilder.struct().name("record") .field("boolean", Schema.BOOLEAN_SCHEMA) @@ -165,7 +191,7 @@ protected List createRecordsNoVersion(int size, long startOffset) { return sinkRecords; } - protected List createRecordsWithAlteringSchemas(int size, long startOffset) { + protected List createSinkRecordsWithAlteringSchemas(int size, long startOffset) { String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); @@ -186,7 +212,7 @@ protected List createRecordsWithAlteringSchemas(int size, long start return sinkRecords; } - protected List createRecordsInterleaved(int size, long startOffset, Set partitions) { + protected List createSinkRecordsInterleaved(int size, long startOffset, Set partitions) { String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index 48e73a2..d0f4615 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -55,7 +55,7 @@ public void testWriteRecord() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecords(7); + List sinkRecords = createSinkRecords(7); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -98,7 +98,7 @@ public void testRecovery() throws Exception { assertTrue(offsets.containsKey(TOPIC_PARTITION)); assertEquals(50L, (long) offsets.get(TOPIC_PARTITION)); - List sinkRecords = createRecords(3, 50); + List sinkRecords = createSinkRecords(3, 50); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -117,7 +117,7 @@ public void testWriteRecordMultiplePartitions() throws Exception { hdfsWriter.recover(tp); } - List sinkRecords = createRecords(7, 0, assignment); + List sinkRecords = createSinkRecords(7, 0, assignment); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -164,7 +164,7 @@ public void testWriteRecordNonZeroInitialOffset() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecords(7, 3); + List sinkRecords = createSinkRecords(7, 3); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -185,7 +185,7 @@ public void testRebalance() throws Exception { hdfsWriter.recover(tp); } - List sinkRecords = createRecords(7, 0, assignment); + List sinkRecords = createSinkRecords(7, 0, assignment); hdfsWriter.write(sinkRecords); Set oldAssignment = new HashSet<>(assignment); @@ -207,7 +207,7 @@ public void testRebalance() throws Exception { verify(sinkRecords, validOffsetsTopicPartition2, Collections.singleton(TOPIC_PARTITION2), true); // Message offsets start at 6 because we discarded the in-progress temp file on re-balance - sinkRecords = createRecords(3, 6, assignment); + sinkRecords = createSinkRecords(3, 6, assignment); hdfsWriter.write(sinkRecords); hdfsWriter.close(newAssignment); @@ -234,7 +234,7 @@ public void testProjectBackWard() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecordsWithAlteringSchemas(7, 0); + List sinkRecords = createSinkRecordsWithAlteringSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -253,7 +253,7 @@ public void testProjectNone() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecordsWithAlteringSchemas(7, 0); + List sinkRecords = createSinkRecordsWithAlteringSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -275,7 +275,7 @@ public void testProjectForward() throws Exception { hdfsWriter.recover(TOPIC_PARTITION); // By excluding the first element we get a list starting with record having the new schema. - List sinkRecords = createRecordsWithAlteringSchemas(8, 0).subList(1, 8); + List sinkRecords = createSinkRecordsWithAlteringSchemas(8, 0).subList(1, 8); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -295,8 +295,8 @@ public void testProjectNoVersion() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecordsNoVersion(1, 0); - sinkRecords.addAll(createRecordsWithAlteringSchemas(7, 0)); + List sinkRecords = createSinkRecordsNoVersion(1, 0); + sinkRecords.addAll(createSinkRecordsWithAlteringSchemas(7, 0)); try { hdfsWriter.write(sinkRecords); @@ -332,7 +332,7 @@ public void testFlushPartialFile() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecords(NUMBER_OF_RECORDS); + List sinkRecords = createSinkRecords(NUMBER_OF_RECORDS); hdfsWriter.write(sinkRecords); // wait for rotation to happen diff --git a/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java b/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java index 3b7ba95..8cf84ea 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -36,7 +37,6 @@ import io.confluent.connect.hdfs.Format; import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; import io.confluent.connect.hdfs.RecordWriterProvider; -import io.confluent.connect.hdfs.SchemaFileReader; import io.confluent.connect.hdfs.TestWithMiniDFSCluster; import io.confluent.connect.hdfs.TopicPartitionWriter; import io.confluent.connect.hdfs.filter.CommittedFileFilter; @@ -81,11 +81,11 @@ public void testWriteRecordDefaultWithPadding() throws Exception { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter( TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData); - String key = "key"; Schema schema = createSchema(); - Struct[] records = createRecords(schema); - - Collection sinkRecords = createSinkRecords(records, key, schema); + List records = createRecordBatches(schema, 3, 3); + // Add a single records at the end of the batches sequence. Total records: 10 + records.add(createRecord(schema)); + List sinkRecords = createSinkRecords(records, schema); for (SinkRecord record : sinkRecords) { topicPartitionWriter.buffer(record); @@ -102,7 +102,8 @@ public void testWriteRecordDefaultWithPadding() throws Exception { "/" + TOPIC + "+" + PARTITION + "+03+05" + extension)); expectedFiles.add(new Path(url + "/" + topicsDir + "/" + TOPIC + "/partition=" + PARTITION + "/" + TOPIC + "+" + PARTITION + "+06+08" + extension)); - verify(expectedFiles, records, schema); + int expectedBatchSize = 3; + verify(expectedFiles, expectedBatchSize, records, schema); } @@ -117,11 +118,17 @@ public void testWriteRecordFieldPartitioner() throws Exception { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter( TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData); - String key = "key"; Schema schema = createSchema(); - Struct[] records = createRecords(schema); + List records = new ArrayList<>(); + for (int i = 16; i < 19; ++i) { + for (int j = 0; j < 3; ++j) { + records.add(createRecord(schema, i, 12.2f)); - Collection sinkRecords = createSinkRecords(records, key, schema); + } + } + // Add a single records at the end of the batches sequence. Total records: 10 + records.add(createRecord(schema)); + List sinkRecords = createSinkRecords(records, schema); for (SinkRecord record : sinkRecords) { topicPartitionWriter.buffer(record); @@ -131,7 +138,6 @@ public void testWriteRecordFieldPartitioner() throws Exception { topicPartitionWriter.write(); topicPartitionWriter.close(); - String directory1 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(16)); String directory2 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(17)); String directory3 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(18)); @@ -141,7 +147,8 @@ public void testWriteRecordFieldPartitioner() throws Exception { expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory2, TOPIC_PARTITION, 3, 5, extension, zeroPadFormat))); expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory3, TOPIC_PARTITION, 6, 8, extension, zeroPadFormat))); - verify(expectedFiles, records, schema); + int expectedBatchSize = 3; + verify(expectedFiles, expectedBatchSize, records, schema); } @Test @@ -153,11 +160,11 @@ public void testWriteRecordTimeBasedPartition() throws Exception { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter( TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData); - String key = "key"; Schema schema = createSchema(); - Struct[] records = createRecords(schema); - - Collection sinkRecords = createSinkRecords(records, key, schema); + List records = createRecordBatches(schema, 3, 3); + // Add a single records at the end of the batches sequence. Total records: 10 + records.add(createRecord(schema)); + List sinkRecords = createSinkRecords(records, schema); for (SinkRecord record : sinkRecords) { topicPartitionWriter.buffer(record); @@ -167,7 +174,6 @@ public void testWriteRecordTimeBasedPartition() throws Exception { topicPartitionWriter.write(); topicPartitionWriter.close(); - long partitionDurationMs = (Long) config.get(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG); String pathFormat = (String) config.get(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG); String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG); @@ -182,7 +188,8 @@ public void testWriteRecordTimeBasedPartition() throws Exception { expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 3, 5, extension, zeroPadFormat))); expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 6, 8, extension, zeroPadFormat))); - verify(expectedFiles, records, schema); + int expectedBatchSize = 3; + verify(expectedFiles, expectedBatchSize, records, schema); } private Map createConfig() { @@ -209,51 +216,7 @@ private void createLogsDir(String url, String logsDir) throws IOException { } } - private Struct[] createRecords(Schema schema) { - Struct record1 = new Struct(schema) - .put("boolean", true) - .put("int", 16) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record2 = new Struct(schema) - .put("boolean", true) - .put("int", 17) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record3 = new Struct(schema) - .put("boolean", true) - .put("int", 18) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - ArrayList records = new ArrayList<>(); - records.add(record1); - records.add(record2); - records.add(record3); - return records.toArray(new Struct[records.size()]); - } - - - private ArrayList createSinkRecords(Struct[] records, String key, Schema schema) { - ArrayList sinkRecords = new ArrayList<>(); - long offset = 0; - for (Struct record : records) { - for (long count = 0; count < 3; count++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - offset + count); - sinkRecords.add(sinkRecord); - } - offset = offset + 3; - } - return sinkRecords; - } - - private void verify(Set expectedFiles, Struct[] records, Schema schema) throws IOException { + private void verify(Set expectedFiles, int expectedSize, List records, Schema schema) throws IOException { Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC)); FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter()); assertEquals(expectedFiles.size(), statuses.length); @@ -262,11 +225,11 @@ private void verify(Set expectedFiles, Struct[] records, Schema schema) th Path filePath = status.getPath(); assertTrue(expectedFiles.contains(status.getPath())); Collection avroRecords = schemaFileReader.readData(conf, filePath); - assertEquals(3, avroRecords.size()); - for (Object avroRecord: avroRecords) { - assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord); + assertEquals(expectedSize, avroRecords.size()); + for (Object avroRecord : avroRecords) { + assertEquals(avroData.fromConnectData(schema, records.get(index++)), avroRecord); } - index++; } } + } diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java index 6c71336..ac4ae99 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java @@ -49,7 +49,7 @@ public void testWriteRecord() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createRecords(7); + List sinkRecords = createSinkRecords(7); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); From b87115fed8eb9522a0ce453fd29dd00456460028 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Fri, 21 Apr 2017 22:23:57 -0700 Subject: [PATCH 36/62] Fix hive tests. --- .../connect/hdfs/avro/AvroHiveUtilTest.java | 41 +++-- .../parquet/HiveIntegrationParquetTest.java | 148 ++++++------------ .../hdfs/parquet/ParquetHiveUtilTest.java | 31 ++-- 3 files changed, 85 insertions(+), 135 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java b/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java index 52d553b..74322e1 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java @@ -26,7 +26,6 @@ import org.junit.Test; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import io.confluent.connect.avro.AvroData; @@ -58,14 +57,17 @@ public void testCreateTable() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } @@ -74,15 +76,15 @@ public void testCreateTable() throws Exception { assertEquals(1, partitionCols.size()); assertEquals("partition", partitionCols.get(0).getName()); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "12"}; String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } @@ -97,14 +99,17 @@ public void testAlterSchema() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } @@ -114,15 +119,15 @@ public void testAlterSchema() throws Exception { hive.alterSchema(hiveDatabase, TOPIC, newSchema); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "abc", "12"}; String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } @@ -131,16 +136,8 @@ private void prepareData(String topic, int partition) throws Exception { TopicPartition tp = new TopicPartition(topic, partition); DataWriter hdfsWriter = createWriter(context, avroData); hdfsWriter.recover(tp); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createSinkRecords(7); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java index 6bf4a32..5a7601e 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java @@ -25,7 +25,7 @@ import org.junit.Test; import java.util.ArrayList; -import java.util.Collection; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -58,16 +58,7 @@ public void testSyncWithHiveParquet() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createSinkRecords(7); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -80,14 +71,18 @@ public void testSyncWithHiveParquet() throws Exception { hdfsWriter = new DataWriter(config, context, avroData); hdfsWriter.syncWithHive(); + Schema schema = createSchema(); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); @@ -113,29 +108,21 @@ public void testHiveIntegrationParquet() throws Exception { DataWriter hdfsWriter = new DataWriter(config, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); + List sinkRecords = createSinkRecords(7); - sinkRecords.add(sinkRecord); - } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); + Schema schema = createSchema(); Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); } List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); @@ -159,20 +146,9 @@ public void testHiveIntegrationFieldPartitionerParquet() throws Exception { HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); - String key = "key"; Schema schema = createSchema(); - - Struct[] records = createRecords(schema); - ArrayList sinkRecords = new ArrayList<>(); - long offset = 0; - for (Struct record : records) { - for (long count = 0; count < 3; count++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - offset + count); - sinkRecords.add(sinkRecord); - } - offset = offset + 3; - } + List records = createRecordBatches(schema, 3, 3); + List sinkRecords = createSinkRecords(records, schema); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -181,17 +157,16 @@ public void testHiveIntegrationFieldPartitionerParquet() throws Exception { Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); } List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); - String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG); String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16); String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17); @@ -206,20 +181,25 @@ public void testHiveIntegrationFieldPartitionerParquet() throws Exception { assertEquals(expectedPartitions, partitions); - ArrayList expectedResult = new ArrayList<>(); - for (int i = 16; i <= 18; ++i) { - String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"}; + List> expectedResults = new ArrayList<>(); + for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { - expectedResult.add(part); + List result = new ArrayList<>(); + for (Field field : schema.fields()) { + result.add(String.valueOf(records.get(i).get(field.name()))); + } + expectedResults.add(result); } } + String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); String[] rows = result.split("\n"); assertEquals(9, rows.length); for (int i = 0; i < rows.length; ++i) { String[] parts = HiveTestUtils.parseOutput(rows[i]); - for (int j = 0; j < expectedResult.get(i).length; ++j) { - assertEquals(expectedResult.get(i)[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResults.get(i)) { + assertEquals(expectedValue, parts[j++]); } } } @@ -235,20 +215,9 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); - String key = "key"; Schema schema = createSchema(); - - Struct[] records = createRecords(schema); - ArrayList sinkRecords = new ArrayList<>(); - long offset = 0; - for (Struct record : records) { - for (long count = 0; count < 3; count++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - offset + count); - sinkRecords.add(sinkRecord); - } - offset = offset + 3; - } + List records = createRecordBatches(schema, 3, 3); + List sinkRecords = createSinkRecords(records, schema); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -257,12 +226,12 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); } List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); @@ -281,17 +250,23 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { ArrayList partitionFields = new ArrayList<>(); String[] groups = encodedPartition.split("/"); - for (String group: groups) { + for (String group : groups) { String field = group.split("=")[1]; partitionFields.add(field); } - ArrayList expectedResult = new ArrayList<>(); - for (int i = 16; i <= 18; ++i) { - String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2", - partitionFields.get(0), partitionFields.get(1), partitionFields.get(2)}; - for (int j = 0; j < 3; ++j) { - expectedResult.add(part); + List> expectedResults = new ArrayList<>(); + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 3; ++i) { + List result = Arrays.asList("true", + String.valueOf(16 + i), + String.valueOf((long) (16 + i)), + String.valueOf(12.2f + i), + String.valueOf((double) (12.2f + i)), + partitionFields.get(0), + partitionFields.get(1), + partitionFields.get(2)); + expectedResults.add(result); } } @@ -300,38 +275,11 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { assertEquals(9, rows.length); for (int i = 0; i < rows.length; ++i) { String[] parts = HiveTestUtils.parseOutput(rows[i]); - for (int j = 0; j < expectedResult.get(i).length; ++j) { - assertEquals(expectedResult.get(i)[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResults.get(i)) { + System.err.println("Exp: " + expectedValue + " Actual: " + parts[j]); + assertEquals(expectedValue, parts[j++]); } } } - - private Struct[] createRecords(Schema schema) { - Struct record1 = new Struct(schema) - .put("boolean", true) - .put("int", 16) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record2 = new Struct(schema) - .put("boolean", true) - .put("int", 17) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record3 = new Struct(schema) - .put("boolean", true) - .put("int", 18) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - ArrayList records = new ArrayList<>(); - records.add(record1); - records.add(record2); - records.add(record3); - return records.toArray(new Struct[records.size()]); - } } diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java index 1df3455..2467cf9 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java @@ -69,9 +69,12 @@ public void testCreateTable() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); @@ -85,15 +88,15 @@ public void testCreateTable() throws Exception { assertEquals(1, partitionCols.size()); assertEquals("partition", partitionCols.get(0).getName()); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "12"}; - String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); + String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } @@ -108,9 +111,13 @@ public void testAlterSchema() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Schema newSchema = createNewSchema(); + Struct expectedRecord = createRecord(newSchema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); @@ -121,19 +128,17 @@ public void testAlterSchema() throws Exception { assertEquals(expectedColumnNames, actualColumnNames); - Schema newSchema = createNewSchema(); - hive.alterSchema(hiveDatabase, TOPIC, newSchema); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "NULL", "12"}; String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } From 66ddb58c82dbb35f8ab5d05f9c5390b005ae9de8 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Fri, 21 Apr 2017 22:34:43 -0700 Subject: [PATCH 37/62] Add tests with interleaved (non consecutive) records between partitions. --- .../connect/hdfs/avro/DataWriterAvroTest.java | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index d0f4615..c7352c6 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -128,6 +128,40 @@ public void testWriteRecordMultiplePartitions() throws Exception { verify(sinkRecords, validOffsets, assignment); } + @Test + public void testWriteInterleavedRecordsInMultiplePartitions() throws Exception { + DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); + + for (TopicPartition tp: assignment) { + hdfsWriter.recover(tp); + } + + List sinkRecords = createSinkRecordsInterleaved(7 * assignment.size(), 0, assignment); + + hdfsWriter.write(sinkRecords); + hdfsWriter.close(assignment); + hdfsWriter.stop(); + + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets, assignment); + } + + @Test + public void testWriteInterleavedRecordsInMultiplePartitionsNonZeroInitialOffset() throws Exception { + DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); + + List sinkRecords = createSinkRecordsInterleaved(7 * assignment.size(), 9, assignment); + + hdfsWriter.write(sinkRecords); + hdfsWriter.close(assignment); + hdfsWriter.stop(); + + long[] validOffsets = {9, 12, 15}; + verify(sinkRecords, validOffsets, assignment); + } + @Test public void testGetPreviousOffsets() throws Exception { String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); From 815a693b596360bc96c9b532f5dbfc357ab33446 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Fri, 21 Apr 2017 22:45:04 -0700 Subject: [PATCH 38/62] Code style fixes. --- .../hdfs/HdfsSinkConnectorTestBase.java | 11 +++++----- .../connect/hdfs/TestWithMiniDFSCluster.java | 20 +++++++++++-------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java index b1e6158..4145ef8 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java @@ -56,7 +56,6 @@ public class HdfsSinkConnectorTestBase { protected static final TopicPartition TOPIC_WITH_DOTS_PARTITION = new TopicPartition(TOPIC_WITH_DOTS, PARTITION); protected static Set assignment; - protected Map createProps() { Map props = new HashMap<>(); props.put(HdfsSinkConnectorConfig.HDFS_URL_CONFIG, url); @@ -77,11 +76,11 @@ protected Schema createSchema() { // Create a batch of records with incremental numeric field values. Total number of records is given by 'size'. protected Struct createRecord(Schema schema, int ibase, float fbase) { return new Struct(schema) - .put("boolean", true) - .put("int", ibase) - .put("long", (long) ibase) - .put("float", fbase) - .put("double", (double) fbase); + .put("boolean", true) + .put("int", ibase) + .put("long", (long) ibase) + .put("float", fbase) + .put("double", (double) fbase); } protected Struct createRecord(Schema schema) { diff --git a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java index abc658e..df97afb 100644 --- a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java +++ b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java @@ -240,10 +240,12 @@ protected String getDirectory(String topic, int partition) { /** * Verify files and records are uploaded appropriately. - * @param sinkRecords a flat list of the records that need to appear in potentially several files in S3. - * @param validOffsets an array containing the offsets that map to uploaded files for a topic-partition. - * Offsets appear in ascending order, the difference between two consecutive offsets - * equals the expected size of the file, and last offset is exclusive. + * + * @param sinkRecords a flat list of the records that need to appear in potentially several files + * in HDFS. + * @param validOffsets an array containing the offsets that map to uploaded files for a + * topic-partition. Offsets appear in ascending order, the difference between two consecutive + * offsets equals the expected size of the file, and last offset is exclusive. */ protected void verify(List sinkRecords, long[] validOffsets) throws IOException { verify(sinkRecords, validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION)), false); @@ -256,10 +258,12 @@ protected void verify(List sinkRecords, long[] validOffsets, Set sinkRecords, long[] validOffsets, Set partitions, From e57fed9d56fd2add5cf984499acab92593c0c3cd Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Mon, 28 Nov 2016 10:01:44 -0800 Subject: [PATCH 39/62] Increase maximum perm size to accommodate failing tests. --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 5af1775..c8a4255 100644 --- a/pom.xml +++ b/pom.xml @@ -223,7 +223,7 @@ maven-surefire-plugin 2.18.1 - -Djava.awt.headless=true + -Djava.awt.headless=true -XX:MaxPermSize=512m pertest From d449c4f0263d3d0c00a144a2fc995b06d5687c4a Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Wed, 14 Dec 2016 15:35:18 -0800 Subject: [PATCH 40/62] Upgrade surefire parameters for test forking. --- pom.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index c8a4255..31a74bc 100644 --- a/pom.xml +++ b/pom.xml @@ -224,7 +224,8 @@ 2.18.1 -Djava.awt.headless=true -XX:MaxPermSize=512m - pertest + false + 1 From b8a335bfc7e3fc5709455e1347d897cc51aca922 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Tue, 25 Apr 2017 17:30:14 -0700 Subject: [PATCH 41/62] Clean commented out code and fix typo. --- .../connect/hdfs/HdfsSinkConnectorTestBase.java | 4 ++-- .../connect/hdfs/TestWithMiniDFSCluster.java | 16 +--------------- .../connect/hdfs/avro/DataWriterAvroTest.java | 8 ++++---- 3 files changed, 7 insertions(+), 21 deletions(-) diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java index 4145ef8..835a0b8 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java @@ -73,7 +73,6 @@ protected Schema createSchema() { .build(); } - // Create a batch of records with incremental numeric field values. Total number of records is given by 'size'. protected Struct createRecord(Schema schema, int ibase, float fbase) { return new Struct(schema) .put("boolean", true) @@ -108,7 +107,8 @@ protected Struct createNewRecord(Schema newSchema) { .put("string", "def"); } - // Create a batch of records with incremental numeric field values. Total number of records is given by 'size'. + // Create a batch of records with incremental numeric field values. Total number of records is + // given by 'size'. protected List createRecordBatch(Schema schema, int size) { ArrayList records = new ArrayList<>(size); int ibase = 16; diff --git a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java index df97afb..ffa26fb 100644 --- a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java +++ b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java @@ -126,20 +126,6 @@ protected List createSinkRecords(int size, long startOffset) { * @return the list of records. */ protected List createSinkRecords(int size, long startOffset, Set partitions) { - /* - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - List sinkRecords = new ArrayList<>(); - for (TopicPartition tp : partitions) { - for (long offset = startOffset; offset < startOffset + size; ++offset) { - sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); - } - } - return sinkRecords; - */ - Schema schema = createSchema(); Struct record = createRecord(schema); List same = new ArrayList<>(); @@ -191,7 +177,7 @@ protected List createSinkRecordsNoVersion(int size, long startOffset return sinkRecords; } - protected List createSinkRecordsWithAlteringSchemas(int size, long startOffset) { + protected List createSinkRecordsWithAlternatingSchemas(int size, long startOffset) { String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index c7352c6..ca80a7a 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -268,7 +268,7 @@ public void testProjectBackWard() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createSinkRecordsWithAlteringSchemas(7, 0); + List sinkRecords = createSinkRecordsWithAlternatingSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -287,7 +287,7 @@ public void testProjectNone() throws Exception { partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - List sinkRecords = createSinkRecordsWithAlteringSchemas(7, 0); + List sinkRecords = createSinkRecordsWithAlternatingSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -309,7 +309,7 @@ public void testProjectForward() throws Exception { hdfsWriter.recover(TOPIC_PARTITION); // By excluding the first element we get a list starting with record having the new schema. - List sinkRecords = createSinkRecordsWithAlteringSchemas(8, 0).subList(1, 8); + List sinkRecords = createSinkRecordsWithAlternatingSchemas(8, 0).subList(1, 8); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -330,7 +330,7 @@ public void testProjectNoVersion() throws Exception { hdfsWriter.recover(TOPIC_PARTITION); List sinkRecords = createSinkRecordsNoVersion(1, 0); - sinkRecords.addAll(createSinkRecordsWithAlteringSchemas(7, 0)); + sinkRecords.addAll(createSinkRecordsWithAlternatingSchemas(7, 0)); try { hdfsWriter.write(sinkRecords); From be14311b170ac9f64f8bf25bf212b7c76a14bf75 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Tue, 23 May 2017 22:55:38 -0700 Subject: [PATCH 42/62] Remove debugging statement from parquet test. --- .../connect/hdfs/parquet/HiveIntegrationParquetTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java index 5a7601e..561f298 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java @@ -277,7 +277,6 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { String[] parts = HiveTestUtils.parseOutput(rows[i]); int j = 0; for (String expectedValue : expectedResults.get(i)) { - System.err.println("Exp: " + expectedValue + " Actual: " + parts[j]); assertEquals(expectedValue, parts[j++]); } } From 6970a123c8ea246b6290ca5891f95f0cba3a2e5c Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Fri, 26 May 2017 14:52:43 -0700 Subject: [PATCH 43/62] Fix incorrect licensing and webpage info. --- NOTICE | 4 +- licenses/LICENSE.gpl2.txt | 339 -------------------------------------- 2 files changed, 2 insertions(+), 341 deletions(-) delete mode 100644 licenses/LICENSE.gpl2.txt diff --git a/NOTICE b/NOTICE index 37aa9de..929fdfc 100644 --- a/NOTICE +++ b/NOTICE @@ -285,8 +285,8 @@ The following libraries are included in packaged versions of this project: * Pentaho aggdesigner * COPYRIGHT: Copyright 2006 - 2013 Pentaho Corporation - * LICENSE: licenses/LICENSE.gpl2.txt - * HOMEPAGE: https://github.com/pentaho/pentaho-aggdesigner + * LICENSE: licenses/LICENSE.apache2.txt + * HOMEPAGE: https://github.com/julianhyde/aggdesigner/tree/master/pentaho-aggdesigner-algorithm * SLF4J * COPYRIGHT: Copyright (c) 2004-2013 QOS.ch diff --git a/licenses/LICENSE.gpl2.txt b/licenses/LICENSE.gpl2.txt deleted file mode 100644 index d159169..0000000 --- a/licenses/LICENSE.gpl2.txt +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. From 3592793b7b87debcd485f6bf19f71f24aece5fae Mon Sep 17 00:00:00 2001 From: Magesh Nandakumar Date: Tue, 6 Jun 2017 15:04:38 -0700 Subject: [PATCH 44/62] Upgrade avro to 1.8.0 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 31a74bc..dc243a9 100644 --- a/pom.xml +++ b/pom.xml @@ -55,7 +55,7 @@ 4.12 2.7.3 1.2.1 - 1.7.7 + 1.8.0 1.7.0 2.4 2.9.7 From c1d1f97509a7e06a02aaf6d42e2bc7da42b50f0c Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Tue, 6 Jun 2017 15:22:45 -0700 Subject: [PATCH 45/62] Add 3.2.2 changelog --- docs/changelog.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 3447d44..aa614e9 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -3,6 +3,12 @@ Changelog ========= +Version 3.2.2 +------------- + +* `PR-194 `_ - Fix HdfsSinkConnector to extend from SinkConnector instead of Connector. +* `PR-200 `_ - Fix incorrect licensing and webpage info. + Version 3.2.1 ------------- No changes From a1fd0c4175e053ed23586eb245d6c8198942d6dc Mon Sep 17 00:00:00 2001 From: Magesh Nandakumar Date: Wed, 7 Jun 2017 11:29:07 -0700 Subject: [PATCH 46/62] Upgrade avro to 1.8.2 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index dc243a9..65b741f 100644 --- a/pom.xml +++ b/pom.xml @@ -55,7 +55,7 @@ 4.12 2.7.3 1.2.1 - 1.8.0 + 1.8.2 1.7.0 2.4 2.9.7 From c464ef235ae2d5d726f35b740afce6eea222167b Mon Sep 17 00:00:00 2001 From: Magesh Nandakumar Date: Thu, 8 Jun 2017 12:59:14 -0700 Subject: [PATCH 47/62] Upgrade avro to 1.8.2 --- docs/hdfs_connector.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index aaf5c86..bf25c25 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -70,11 +70,11 @@ in HDFS:: You should see a file with name ``/topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro`` The file name is encoded as ``topic+kafkaPartition+startOffset+endOffset.format``. -You can use ``avro-tools-1.7.7.jar`` -(available in `Apache mirrors `_) +You can use ``avro-tools-1.8.2.jar`` +(available in `Apache mirrors `_) to extract the content of the file. Run ``avro-tools`` directly on Hadoop as:: - $ hadoop jar avro-tools-1.7.7.jar tojson \ + $ hadoop jar avro-tools-1.8.2.jar tojson \ hdfs:///topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro where "" is the HDFS name node hostname. @@ -84,7 +84,7 @@ or, if you experience issues, first copy the avro file from HDFS to the local fi $ hadoop fs -copyToLocal /topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro \ /tmp/test_hdfs+0+0000000000+0000000002.avro - $ java -jar avro-tools-1.7.7.jar tojson /tmp/test_hdfs+0+0000000000+0000000002.avro + $ java -jar avro-tools-1.8.2.jar tojson /tmp/test_hdfs+0+0000000000+0000000002.avro You should see the following output:: From 437485a0788f427e59245c72f55cd005a67a63b1 Mon Sep 17 00:00:00 2001 From: dan norwood Date: Mon, 12 Jun 2017 11:22:18 -0700 Subject: [PATCH 48/62] jenkinsfile time --- Jenkinsfile | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 Jenkinsfile diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..cb7565a --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,4 @@ +#!/usr/bin/env groovy +common { + slackChannel = '#connect-eng' +} From 53370794b64a0ffe7d1ee8bc89d3a74a26d019de Mon Sep 17 00:00:00 2001 From: Sriram Subramanian Date: Thu, 15 Jun 2017 14:22:15 -0700 Subject: [PATCH 49/62] Set Confluent to 3.2.2, Kafka to 0.10.2.1-cp2. - Automatic commit via: tbr apply-rc-tag --rc 2 --confluent-patch 2 --build temp-rc-settings -e prod PLATFORM --skip-projects security,confluent-cli --no-dry-run --- docs/conf.py | 2 +- pom.xml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a63635a..dce850b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.2-SNAPSHOT' +release = '3.2.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 2b2ffd1..2debb3f 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.2-SNAPSHOT + 3.2.2 kafka-connect-hdfs Confluent, Inc. @@ -50,8 +50,8 @@ - 3.2.2-SNAPSHOT - 0.10.2.2-SNAPSHOT + 3.2.2 + 0.10.2.1-cp2 4.12 2.7.3 1.2.1 From 28eb13ba6844ee6c9871ebd1472f42dbaa45cf09 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Wed, 21 Jun 2017 15:44:13 -0500 Subject: [PATCH 50/62] Add 3.3.0 changelog --- docs/changelog.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index aa614e9..f7d3295 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -3,6 +3,12 @@ Changelog ========= +Version 3.3.0 +------------- + +* `PR-187 `_ - CC-491: Consolidate and simplify unit tests of HDFS connector. +* `PR-205 `_ - Upgrade avro to 1.8.2. + Version 3.2.2 ------------- From 367da7109edb7e26a8b9726af2ad44858f255fd8 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Wed, 26 Jul 2017 23:13:41 -0700 Subject: [PATCH 51/62] Update quickstart to use Confluent CLI --- docs/hdfs_connector.rst | 109 ++++++++++++++++++++++++++++++++-------- 1 file changed, 88 insertions(+), 21 deletions(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index bf25c25..ac6b4e4 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -20,9 +20,8 @@ Quickstart In this Quickstart, we use the HDFS connector to export data produced by the Avro console producer to HDFS. -Start Zookeeper, Kafka and SchemaRegistry if you haven't done so. The instructions on how to start -these services are available at the Confluent Platform QuickStart. You also need to have Hadoop -running locally or remotely and make sure that you know the HDFS url. For Hive integration, you +Before you start the Confluent services, make sure Hadoop is +running locally or remotely and that you know the HDFS url. For Hive integration, you need to have Hive installed and to know the metastore thrift uri. This Quickstart assumes that you started the required services with the default configurations and @@ -39,12 +38,41 @@ Also, this Quickstart assumes that security is not configured for HDFS and Hive please make the necessary configurations change following `Secure HDFS and Hive Metastore`_ section. -First, start the Avro console producer:: +First, start all the necessary services using Confluent CLI. + +.. tip:: + + If not already in your PATH, add Confluent's ``bin`` directory by running: ``export PATH=/bin:$PATH`` + +.. sourcecode:: bash + + $ confluent start + +Every service will start in order, printing a message with its status: + +.. sourcecode:: bash + + Starting zookeeper + zookeeper is [UP] + Starting kafka + kafka is [UP] + Starting schema-registry + schema-registry is [UP] + Starting kafka-rest + kafka-rest is [UP] + Starting connect + connect is [UP] + +Next, start the Avro console producer to import a few records to Kafka: + +.. sourcecode:: bash $ ./bin/kafka-avro-console-producer --broker-list localhost:9092 --topic test_hdfs \ --property value.schema='{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}' -Then in the console producer, type in:: +Then in the console producer, type in: + +.. sourcecode:: bash {"f1": "value1"} {"f1": "value2"} @@ -54,16 +82,37 @@ The three records entered are published to the Kafka topic ``test_hdfs`` in Avro Before starting the connector, please make sure that the configurations in ``etc/kafka-connect-hdfs/quickstart-hdfs.properties`` are properly set to your configurations of -Hadoop, e.g. ``hdfs.url`` points to the proper HDFS and using FQDN in the host. Then run the -following command to start Kafka connect with the HDFS connector:: +Hadoop, e.g. ``hdfs.url`` points to the proper HDFS and using FQDN in the host. Then start connector by loading its +configuration with the following command: + +.. sourcecode:: bash + + $ confluent load hdfs-sink -d etc/kafka-connect-hdfs/quickstart-hdfs.properties + { + "name": "hdfs-sink", + "config": { + "connector.class": "io.confluent.connect.hdfs.HdfsSinkConnector", + "tasks.max": "1", + "topics": "test_hdfs", + "hdfs.url": "hdfs://localhost:9000", + "flush.size": "3", + "name": "hdfs-sink" + }, + "tasks": [] + } + +To check that the connector started successfully open the Connect worker's log by running: +to HDFS. +.. sourcecode:: bash - $ ./bin/connect-standalone etc/schema-registry/connect-avro-standalone.properties \ - etc/kafka-connect-hdfs/quickstart-hdfs.properties + $ confluent log connect -You should see that the process starts up and logs some messages, and then exports data from Kafka -to HDFS. Once the connector finishes ingesting data to HDFS, check that the data is available -in HDFS:: +By navigating at the end of the log, you should see that the connector starts, logs a few messages, and then exports +data from Kafka to HDFS. +Once the connector finishes ingesting data to HDFS, check that the data is available in HDFS: + +.. sourcecode:: bash $ hadoop fs -ls /topics/test_hdfs/partition=0 @@ -72,21 +121,27 @@ The file name is encoded as ``topic+kafkaPartition+startOffset+endOffset.format` You can use ``avro-tools-1.8.2.jar`` (available in `Apache mirrors `_) -to extract the content of the file. Run ``avro-tools`` directly on Hadoop as:: +to extract the content of the file. Run ``avro-tools`` directly on Hadoop as: + +.. sourcecode:: bash $ hadoop jar avro-tools-1.8.2.jar tojson \ hdfs:///topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro where "" is the HDFS name node hostname. -or, if you experience issues, first copy the avro file from HDFS to the local filesystem and try again with java:: +or, if you experience issues, first copy the avro file from HDFS to the local filesystem and try again with java: + +.. sourcecode:: bash $ hadoop fs -copyToLocal /topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro \ /tmp/test_hdfs+0+0000000000+0000000002.avro $ java -jar avro-tools-1.8.2.jar tojson /tmp/test_hdfs+0+0000000000+0000000002.avro -You should see the following output:: +You should see the following output: + +.. sourcecode:: bash {"f1":"value1"} {"f1":"value2"} @@ -95,13 +150,17 @@ You should see the following output:: .. note:: If you want to run the Quickstart with Hive integration, before starting the connector, you need to add the following configurations to - ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``:: + ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``: + +.. sourcecode:: bash hive.integration=true hive.metastore.uris=thrift uri to your Hive metastore schema.compatibility=BACKWARD - After the connector finishes ingesting data to HDFS, you can use Hive to check the data:: + After the connector finishes ingesting data to HDFS, you can use Hive to check the data: + +.. sourcecode:: sql $hive>SELECT * FROM test_hdfs; @@ -146,7 +205,9 @@ description of the available configuration options. Example ~~~~~~~ -Here is the content of ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``:: +Here is the content of ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``: + +.. sourcecode:: bash name=hdfs-sink connector.class=io.confluent.connect.hdfs.HdfsSinkConnector @@ -168,7 +229,9 @@ Format and Partitioner ~~~~~~~~~~~~~~~~~~~~~~ You need to specify the ``format.class`` and ``partitioner.class`` if you want to write other formats to HDFS or use other partitioners. The following example configurations demonstrates how to -write Parquet format and use hourly partitioner:: +write Parquet format and use hourly partitioner: + +.. sourcecode:: bash format.class=io.confluent.connect.hdfs.parquet.ParquetFormat partitioner.class=io.confluent.connect.hdfs.partitioner.HourlyPartitioner @@ -179,7 +242,9 @@ write Parquet format and use hourly partitioner:: Hive Integration ~~~~~~~~~~~~~~~~ At minimum, you need to specify ``hive.integration``, ``hive.metastore.uris`` and -``schema.compatibility`` when integrating Hive. Here is an example configuration:: +``schema.compatibility`` when integrating Hive. Here is an example configuration: + +.. sourcecode:: bash hive.integration=true hive.metastore.uris=thrift://localhost:9083 # FQDN for the host part @@ -205,7 +270,9 @@ latest Hive table schema. Please find more information on schema compatibility i Secure HDFS and Hive Metastore ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To work with secure HDFS and Hive metastore, you need to specify ``hdfs.authentication.kerberos``, -``connect.hdfs.principal``, ``connect.keytab``, ``hdfs.namenode.principal``:: +``connect.hdfs.principal``, ``connect.keytab``, ``hdfs.namenode.principal``: + +.. sourcecode:: bash hdfs.authentication.kerberos=true connect.hdfs.principal=connect-hdfs/_HOST@YOUR-REALM.COM From 2e8694d347073a19f611df2bbf9522a651f95313 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Thu, 27 Jul 2017 10:33:47 -0700 Subject: [PATCH 52/62] Add stop instructions at the end of the quickstart. --- docs/hdfs_connector.rst | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index ac6b4e4..3de2528 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -147,20 +147,48 @@ You should see the following output: {"f1":"value2"} {"f1":"value3"} +Finally, stop the Connect worker as well as all the rest of the Confluent services by running: -.. note:: If you want to run the Quickstart with Hive integration, before starting the connector, - you need to add the following configurations to - ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``: +.. sourcecode:: bash + + $ confluent stop + Stopping connect + connect is [DOWN] + Stopping kafka-rest + kafka-rest is [DOWN] + Stopping schema-registry + schema-registry is [DOWN] + Stopping kafka + kafka is [DOWN] + Stopping zookeeper + zookeeper is [DOWN] + +or stop all the services and additionally wipe out any data generated during this quickstart by running: .. sourcecode:: bash + $ confluent destroy + Stopping connect + connect is [DOWN] + Stopping kafka-rest + kafka-rest is [DOWN] + Stopping schema-registry + schema-registry is [DOWN] + Stopping kafka + kafka is [DOWN] + Stopping zookeeper + zookeeper is [DOWN] + Deleting: /tmp/confluent.w1CpYsaI + +.. note:: If you want to run the Quickstart with Hive integration, before starting the connector, + you need to add the following configurations to + ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``:: + hive.integration=true hive.metastore.uris=thrift uri to your Hive metastore schema.compatibility=BACKWARD - After the connector finishes ingesting data to HDFS, you can use Hive to check the data: - -.. sourcecode:: sql + After the connector finishes ingesting data to HDFS, you can use Hive to check the data:: $hive>SELECT * FROM test_hdfs; From e2d9b26e9485b3bd1266a4b199e56662ca3e9e50 Mon Sep 17 00:00:00 2001 From: Konstantine Karantasis Date: Thu, 27 Jul 2017 10:39:28 -0700 Subject: [PATCH 53/62] Improve wording around 'confluent log connect' --- docs/hdfs_connector.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index 3de2528..8d42fe2 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -101,14 +101,13 @@ configuration with the following command: "tasks": [] } -To check that the connector started successfully open the Connect worker's log by running: -to HDFS. +To check that the connector started successfully view the Connect worker's log by running: .. sourcecode:: bash $ confluent log connect -By navigating at the end of the log, you should see that the connector starts, logs a few messages, and then exports +Towards the end of the log you should see that the connector starts, logs a few messages, and then exports data from Kafka to HDFS. Once the connector finishes ingesting data to HDFS, check that the data is available in HDFS: From bb6996d05efcd366672b26b4452f145ffcb0c372 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Thu, 27 Jul 2017 11:15:42 -0700 Subject: [PATCH 54/62] Set Confluent to 3.3.0, Kafka to 0.11.0.0-cp1. - Automatic commit via: tbr apply-rc-tag --rc 4 --confluent-patch 1 --build release -e prod CAMUS PLATFORM --no-dry-run --- docs/conf.py | 2 +- pom.xml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index d4bc214..ba62f2f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.3' # The full version, including alpha/beta/rc tags. -release = '3.3.0-SNAPSHOT' +release = '3.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 65b741f..3a91d4c 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.3.0-SNAPSHOT + 3.3.0 kafka-connect-hdfs Confluent, Inc. @@ -50,8 +50,8 @@ - 3.3.0-SNAPSHOT - 0.11.0.0-SNAPSHOT + 3.3.0 + 0.11.0.0-cp1 4.12 2.7.3 1.2.1 From 6af158f9dac55c561b86b936a9fa8f12e3a2b4a0 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Fri, 28 Jul 2017 13:35:53 -0700 Subject: [PATCH 55/62] Bump version to 3.3.1-SNAPSHOT --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 65b741f..bffb8c9 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT kafka-connect-hdfs Confluent, Inc. From 4a7803f4f65d7866bc27e526360c3dceab82bdc3 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Fri, 28 Jul 2017 13:43:55 -0700 Subject: [PATCH 56/62] Bump Confluent to 3.3.1-SNAPSHOT --- docs/conf.py | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index d4bc214..c8cc250 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.3' # The full version, including alpha/beta/rc tags. -release = '3.3.0-SNAPSHOT' +release = '3.3.1-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index bffb8c9..6a04514 100644 --- a/pom.xml +++ b/pom.xml @@ -50,7 +50,7 @@ - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 0.11.0.0-SNAPSHOT 4.12 2.7.3 From 326bdcd10466ae4cb4c409e269824ded1dfba1a5 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Fri, 28 Jul 2017 15:45:36 -0700 Subject: [PATCH 57/62] Bump Kafka to 0.11.0.1-SNAPSHOT --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 6a04514..db70a3b 100644 --- a/pom.xml +++ b/pom.xml @@ -51,7 +51,7 @@ 3.3.1-SNAPSHOT - 0.11.0.0-SNAPSHOT + 0.11.0.1-SNAPSHOT 4.12 2.7.3 1.2.1 From 44e0d85bb5cb93e0659d76af052a1d790111d30f Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Mon, 31 Jul 2017 16:03:10 -0700 Subject: [PATCH 58/62] Bump version to 3.2.3-SNAPSHOT --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 2b2ffd1..9d07d64 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.2.2-SNAPSHOT + 3.2.3-SNAPSHOT kafka-connect-hdfs Confluent, Inc. From fb41464288ec8cacae90c925b3ca24659fa0af28 Mon Sep 17 00:00:00 2001 From: Alex Ayars Date: Mon, 31 Jul 2017 16:07:51 -0700 Subject: [PATCH 59/62] Bump Confluent to 3.2.3-SNAPSHOT --- docs/conf.py | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a63635a..8f3d244 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.2' # The full version, including alpha/beta/rc tags. -release = '3.2.2-SNAPSHOT' +release = '3.2.3-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index 9d07d64..7fb0228 100644 --- a/pom.xml +++ b/pom.xml @@ -50,7 +50,7 @@ - 3.2.2-SNAPSHOT + 3.2.3-SNAPSHOT 0.10.2.2-SNAPSHOT 4.12 2.7.3 From 4ec9fe2afa1ad281d6fe886de4e3055a06b924a7 Mon Sep 17 00:00:00 2001 From: Confluent Jenkins Bot Date: Thu, 17 Aug 2017 14:32:31 -0700 Subject: [PATCH 60/62] Set Confluent to 3.3.0-hotfix.1, Kafka to 0.11.0.0-hotfix.1. --- docs/conf.py | 2 +- pom.xml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index c8cc250..2604c1a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '3.3' # The full version, including alpha/beta/rc tags. -release = '3.3.1-SNAPSHOT' +release = '3.3.0-hotfix.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pom.xml b/pom.xml index db70a3b..28a57a7 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.confluent kafka-connect-hdfs jar - 3.3.1-SNAPSHOT + 3.3.0-hotfix.1 kafka-connect-hdfs Confluent, Inc. @@ -50,8 +50,8 @@ - 3.3.1-SNAPSHOT - 0.11.0.1-SNAPSHOT + 3.3.0-hotfix.1 + 0.11.0.0-hotfix.1 4.12 2.7.3 1.2.1 From e1041b51f1a23a67c6c1b16deaca6819b6347c7d Mon Sep 17 00:00:00 2001 From: Lewis Dawson Date: Tue, 19 Sep 2017 23:36:50 -0700 Subject: [PATCH 61/62] Added ability to connector to convert CSV data to Parquet before uploading to S3. --- pom.xml | 4 +- .../streamx/format/csv/CsvParquetFormat.java | 35 ++++ .../csv/CsvParquetRecordWriterProvider.java | 163 ++++++++++++++++++ .../qubole/streamx/s3/S3SinkConnector.java | 6 +- .../streamx/s3/S3SinkConnectorConfig.java | 4 + .../connect/hdfs/HdfsSinkConnectorConfig.java | 2 +- .../connect/hdfs/avro/DataWriterAvroTest.java | 4 +- 7 files changed, 212 insertions(+), 6 deletions(-) create mode 100644 src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java create mode 100644 src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java diff --git a/pom.xml b/pom.xml index 050e7eb..db8396a 100644 --- a/pom.xml +++ b/pom.xml @@ -50,8 +50,8 @@ - 3.3.0-hotfix.1 - 0.11.0.0-hotfix.1 + 3.3.0 + 0.11.0.0 4.12 2.7.3 1.2.1 diff --git a/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java b/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java new file mode 100644 index 0000000..5824803 --- /dev/null +++ b/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java @@ -0,0 +1,35 @@ +package com.qubole.streamx.format.csv; + +import io.confluent.connect.avro.AvroData; +import io.confluent.connect.hdfs.Format; +import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; +import io.confluent.connect.hdfs.RecordWriterProvider; +import io.confluent.connect.hdfs.SchemaFileReader; +import io.confluent.connect.hdfs.hive.HiveMetaStore; +import io.confluent.connect.hdfs.hive.HiveUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Takes CSV data from a {@link org.apache.kafka.connect.storage.StringConverter} and converts it to Parquet format + * prior to S3 upload. + */ +public class CsvParquetFormat implements Format { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetFormat.class); + + @Override + public RecordWriterProvider getRecordWriterProvider() { + return new CsvParquetRecordWriterProvider(); + } + + @Override + public SchemaFileReader getSchemaFileReader(AvroData avroData) { + return null; + } + + @Override + public HiveUtil getHiveUtil(HdfsSinkConnectorConfig config, AvroData avroData, HiveMetaStore hiveMetaStore) { + return null; + } +} diff --git a/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java b/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java new file mode 100644 index 0000000..270d4e2 --- /dev/null +++ b/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java @@ -0,0 +1,163 @@ +package com.qubole.streamx.format.csv; + +import com.qubole.streamx.s3.S3SinkConnector; +import com.qubole.streamx.s3.S3SinkConnectorConfig; +import io.confluent.connect.avro.AvroData; +import io.confluent.connect.hdfs.RecordWriter; +import io.confluent.connect.hdfs.RecordWriterProvider; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.parquet.column.ColumnDescriptor; +import org.apache.parquet.hadoop.ParquetOutputFormat; +import org.apache.parquet.hadoop.api.WriteSupport; +import org.apache.parquet.hadoop.metadata.CompressionCodecName; +import org.apache.parquet.io.api.Binary; +import org.apache.parquet.io.api.RecordConsumer; +import org.apache.parquet.schema.MessageType; +import org.apache.parquet.schema.MessageTypeParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; + +/** + * Needs to be used in conjunction with the {@link org.apache.kafka.connect.storage.StringConverter}, which is set via the + * config/connect-*.properties key.converter and value.converter properties. + */ +public class CsvParquetRecordWriterProvider implements RecordWriterProvider { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetRecordWriterProvider.class); + + @Override + public String getExtension() { + return ".snappy.parquet"; + } + + @Override + public RecordWriter getRecordWriter(Configuration conf, String fileName, SinkRecord record, AvroData avroData) throws IOException { + Path path = new Path(fileName); + + String schemaString = S3SinkConnector.getConfigString(S3SinkConnectorConfig.PARQUET_SCHEMA_CONFIG); + if (schemaString == null) { + throw new IllegalArgumentException(String.format("A Parquet schema must be specified under property %s!", + S3SinkConnectorConfig.PARQUET_SCHEMA_CONFIG)); + } + + MessageType schema = MessageTypeParser.parseMessageType(schemaString); + log.debug("Schema String = {}", schema.toString()); + + final SourceParquetOutputFormat sourceParquetOutputFormat = new SourceParquetOutputFormat(schema); + final org.apache.hadoop.mapreduce.RecordWriter recordWriter; + try { + recordWriter = sourceParquetOutputFormat.getRecordWriter(conf, path, CompressionCodecName.SNAPPY); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + return new RecordWriter() { + @Override + public void write(SinkRecord sinkRecord) throws IOException { + try { + log.trace("SinkRecord = {}", sinkRecord.value()); + recordWriter.write(null, sinkRecord.value().toString()); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + + @Override + public void close() throws IOException { + try { + recordWriter.close(null); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + }; + } + + /** + * Used to parse and write each CSV record to the Parquet {@link RecordConsumer}. + */ + private static final class CsvParquetWriteSupport extends WriteSupport { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetWriteSupport.class); + + private final MessageType schema; + + private RecordConsumer recordConsumer; + + private List columns; + + // TODO: specify the csv splitter + public CsvParquetWriteSupport(MessageType schema) { + this.schema = schema; + this.columns = schema.getColumns(); + } + + @Override + public WriteContext init(Configuration configuration) { + return new WriteContext(schema, new HashMap()); + } + + @Override + public void prepareForWrite(RecordConsumer recordConsumer) { + this.recordConsumer = recordConsumer; + } + + @Override + public void write(String record) { + String[] csvRecords = record.split(","); + + if (csvRecords.length > 0) { + recordConsumer.startMessage(); + + for (int i = 0; i < columns.size(); i++) { + ColumnDescriptor columnDescriptor = columns.get(i); + + // If there aren't enough entries in the csvRecords, write an empty string + String csvRecord = (csvRecords.length < i) ? "" : csvRecords[i]; + + recordConsumer.startField(columns.get(i).getPath()[0], i); + switch (columnDescriptor.getType()) { + case INT32: + recordConsumer.addInteger(Integer.parseInt(csvRecord)); + break; + case INT64: + case INT96: + recordConsumer.addLong(Long.parseLong(csvRecord)); + break; + case BINARY: + case FIXED_LEN_BYTE_ARRAY: + recordConsumer.addBinary(Binary.fromString(csvRecord)); + break; + case BOOLEAN: + recordConsumer.addBoolean(Boolean.parseBoolean(csvRecord)); + break; + case FLOAT: + recordConsumer.addFloat(Float.parseFloat(csvRecord)); + break; + case DOUBLE: + recordConsumer.addDouble(Double.parseDouble(csvRecord)); + break; + default: + throw new UnsupportedOperationException( + String.format("Unsupported record conversion for type '%s'!", columnDescriptor.getType().name())); + } + + recordConsumer.endField(columns.get(i).getPath()[0], i); + } + recordConsumer.endMessage(); + } + } + } + + private static final class SourceParquetOutputFormat extends ParquetOutputFormat { + private SourceParquetOutputFormat(MessageType schema) { + super(new CsvParquetWriteSupport(schema)); + } + } + +} diff --git a/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java b/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java index c7c642b..8328076 100644 --- a/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java +++ b/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java @@ -37,7 +37,7 @@ public class S3SinkConnector extends Connector { private static final Logger log = LoggerFactory.getLogger(S3SinkConnector.class); private Map configProperties; - private S3SinkConnectorConfig config; + private static S3SinkConnectorConfig config; @Override public String version() { @@ -78,4 +78,8 @@ public void stop() throws ConnectException { public ConfigDef config() { return S3SinkConnectorConfig.getConfig(); } + + public static String getConfigString(String key) { + return config.getString(key); + } } diff --git a/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java b/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java index 8e5aac6..17f8caa 100644 --- a/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java +++ b/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java @@ -58,6 +58,9 @@ public class S3SinkConnectorConfig extends HdfsSinkConnectorConfig { public static final String NAME_DEFAULT = ""; private static final String NAME_DISPLAY = "Connector Name"; + public static final String PARQUET_SCHEMA_CONFIG = "s3.parquet.schema"; + private static final String PARQUET_SCHEMA_DOC = "The schema used to write a Parquet file"; + private static final String PARQUET_SCHEMA_DISPLAY = "Parquet Schema"; static { config.define(S3_URL_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, S3_URL_DOC, S3_GROUP, 1, ConfigDef.Width.MEDIUM, S3_URL_DISPLAY); @@ -66,6 +69,7 @@ public class S3SinkConnectorConfig extends HdfsSinkConnectorConfig { config.define(DB_USER_CONFIG, ConfigDef.Type.STRING, DB_USER_DEFAULT, ConfigDef.Importance.LOW, DB_USER_DOC, WAL_GROUP, 1, ConfigDef.Width.MEDIUM, DB_USER_DISPLAY); config.define(DB_PASSWORD_CONFIG, ConfigDef.Type.STRING, DB_PASSWORD_DEFAULT, ConfigDef.Importance.LOW, DB_PASSWORD_DOC, WAL_GROUP, 1, ConfigDef.Width.MEDIUM, DB_PASSWORD_DISPLAY); config.define(NAME_CONFIG, ConfigDef.Type.STRING, NAME_DEFAULT, ConfigDef.Importance.HIGH, NAME_DOC, S3_GROUP,1, ConfigDef.Width.MEDIUM, NAME_DISPLAY); + config.define(PARQUET_SCHEMA_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, PARQUET_SCHEMA_DOC, S3_GROUP, 1, ConfigDef.Width.LONG, PARQUET_SCHEMA_DISPLAY); } diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java index 620c458..f06a7df 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java @@ -16,7 +16,7 @@ import com.qubole.streamx.s3.S3Storage; import org.apache.hadoop.conf.Configuration; -import org.apache.kafka.clients.producer.internals.DefaultPartitioner; +// import org.apache.kafka.clients.producer.internals.DefaultPartitioner; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index b095abf..2040abc 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -73,8 +73,8 @@ public void testRecovery() throws Exception { @SuppressWarnings("unchecked") Class storageClass = (Class) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG)); - //Storage storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url); - Storage storage = StorageFactory.createStorage(storageClass, conf, url); + Storage storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url); + // Storage storage = StorageFactory.createStorage(storageClass, conf, url); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); partitioner = hdfsWriter.getPartitioner(); From 03eb2452089f1fb997c63888865c9214f2c97485 Mon Sep 17 00:00:00 2001 From: Lewis Dawson Date: Tue, 19 Sep 2017 23:36:50 -0700 Subject: [PATCH 62/62] Added ability to connector to convert CSV data to Parquet before uploading to S3. --- pom.xml | 4 +- .../streamx/format/csv/CsvParquetFormat.java | 35 ++++ .../csv/CsvParquetRecordWriterProvider.java | 163 ++++++++++++++++++ .../qubole/streamx/s3/S3SinkConnector.java | 6 +- .../streamx/s3/S3SinkConnectorConfig.java | 4 + .../connect/hdfs/HdfsSinkConnectorConfig.java | 2 +- .../connect/hdfs/avro/DataWriterAvroTest.java | 4 +- 7 files changed, 212 insertions(+), 6 deletions(-) create mode 100644 src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java create mode 100644 src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java diff --git a/pom.xml b/pom.xml index 050e7eb..db8396a 100644 --- a/pom.xml +++ b/pom.xml @@ -50,8 +50,8 @@ - 3.3.0-hotfix.1 - 0.11.0.0-hotfix.1 + 3.3.0 + 0.11.0.0 4.12 2.7.3 1.2.1 diff --git a/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java b/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java new file mode 100644 index 0000000..5824803 --- /dev/null +++ b/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java @@ -0,0 +1,35 @@ +package com.qubole.streamx.format.csv; + +import io.confluent.connect.avro.AvroData; +import io.confluent.connect.hdfs.Format; +import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; +import io.confluent.connect.hdfs.RecordWriterProvider; +import io.confluent.connect.hdfs.SchemaFileReader; +import io.confluent.connect.hdfs.hive.HiveMetaStore; +import io.confluent.connect.hdfs.hive.HiveUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Takes CSV data from a {@link org.apache.kafka.connect.storage.StringConverter} and converts it to Parquet format + * prior to S3 upload. + */ +public class CsvParquetFormat implements Format { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetFormat.class); + + @Override + public RecordWriterProvider getRecordWriterProvider() { + return new CsvParquetRecordWriterProvider(); + } + + @Override + public SchemaFileReader getSchemaFileReader(AvroData avroData) { + return null; + } + + @Override + public HiveUtil getHiveUtil(HdfsSinkConnectorConfig config, AvroData avroData, HiveMetaStore hiveMetaStore) { + return null; + } +} diff --git a/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java b/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java new file mode 100644 index 0000000..270d4e2 --- /dev/null +++ b/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java @@ -0,0 +1,163 @@ +package com.qubole.streamx.format.csv; + +import com.qubole.streamx.s3.S3SinkConnector; +import com.qubole.streamx.s3.S3SinkConnectorConfig; +import io.confluent.connect.avro.AvroData; +import io.confluent.connect.hdfs.RecordWriter; +import io.confluent.connect.hdfs.RecordWriterProvider; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.parquet.column.ColumnDescriptor; +import org.apache.parquet.hadoop.ParquetOutputFormat; +import org.apache.parquet.hadoop.api.WriteSupport; +import org.apache.parquet.hadoop.metadata.CompressionCodecName; +import org.apache.parquet.io.api.Binary; +import org.apache.parquet.io.api.RecordConsumer; +import org.apache.parquet.schema.MessageType; +import org.apache.parquet.schema.MessageTypeParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; + +/** + * Needs to be used in conjunction with the {@link org.apache.kafka.connect.storage.StringConverter}, which is set via the + * config/connect-*.properties key.converter and value.converter properties. + */ +public class CsvParquetRecordWriterProvider implements RecordWriterProvider { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetRecordWriterProvider.class); + + @Override + public String getExtension() { + return ".snappy.parquet"; + } + + @Override + public RecordWriter getRecordWriter(Configuration conf, String fileName, SinkRecord record, AvroData avroData) throws IOException { + Path path = new Path(fileName); + + String schemaString = S3SinkConnector.getConfigString(S3SinkConnectorConfig.PARQUET_SCHEMA_CONFIG); + if (schemaString == null) { + throw new IllegalArgumentException(String.format("A Parquet schema must be specified under property %s!", + S3SinkConnectorConfig.PARQUET_SCHEMA_CONFIG)); + } + + MessageType schema = MessageTypeParser.parseMessageType(schemaString); + log.debug("Schema String = {}", schema.toString()); + + final SourceParquetOutputFormat sourceParquetOutputFormat = new SourceParquetOutputFormat(schema); + final org.apache.hadoop.mapreduce.RecordWriter recordWriter; + try { + recordWriter = sourceParquetOutputFormat.getRecordWriter(conf, path, CompressionCodecName.SNAPPY); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + return new RecordWriter() { + @Override + public void write(SinkRecord sinkRecord) throws IOException { + try { + log.trace("SinkRecord = {}", sinkRecord.value()); + recordWriter.write(null, sinkRecord.value().toString()); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + + @Override + public void close() throws IOException { + try { + recordWriter.close(null); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + }; + } + + /** + * Used to parse and write each CSV record to the Parquet {@link RecordConsumer}. + */ + private static final class CsvParquetWriteSupport extends WriteSupport { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetWriteSupport.class); + + private final MessageType schema; + + private RecordConsumer recordConsumer; + + private List columns; + + // TODO: specify the csv splitter + public CsvParquetWriteSupport(MessageType schema) { + this.schema = schema; + this.columns = schema.getColumns(); + } + + @Override + public WriteContext init(Configuration configuration) { + return new WriteContext(schema, new HashMap()); + } + + @Override + public void prepareForWrite(RecordConsumer recordConsumer) { + this.recordConsumer = recordConsumer; + } + + @Override + public void write(String record) { + String[] csvRecords = record.split(","); + + if (csvRecords.length > 0) { + recordConsumer.startMessage(); + + for (int i = 0; i < columns.size(); i++) { + ColumnDescriptor columnDescriptor = columns.get(i); + + // If there aren't enough entries in the csvRecords, write an empty string + String csvRecord = (csvRecords.length < i) ? "" : csvRecords[i]; + + recordConsumer.startField(columns.get(i).getPath()[0], i); + switch (columnDescriptor.getType()) { + case INT32: + recordConsumer.addInteger(Integer.parseInt(csvRecord)); + break; + case INT64: + case INT96: + recordConsumer.addLong(Long.parseLong(csvRecord)); + break; + case BINARY: + case FIXED_LEN_BYTE_ARRAY: + recordConsumer.addBinary(Binary.fromString(csvRecord)); + break; + case BOOLEAN: + recordConsumer.addBoolean(Boolean.parseBoolean(csvRecord)); + break; + case FLOAT: + recordConsumer.addFloat(Float.parseFloat(csvRecord)); + break; + case DOUBLE: + recordConsumer.addDouble(Double.parseDouble(csvRecord)); + break; + default: + throw new UnsupportedOperationException( + String.format("Unsupported record conversion for type '%s'!", columnDescriptor.getType().name())); + } + + recordConsumer.endField(columns.get(i).getPath()[0], i); + } + recordConsumer.endMessage(); + } + } + } + + private static final class SourceParquetOutputFormat extends ParquetOutputFormat { + private SourceParquetOutputFormat(MessageType schema) { + super(new CsvParquetWriteSupport(schema)); + } + } + +} diff --git a/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java b/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java index c7c642b..8328076 100644 --- a/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java +++ b/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java @@ -37,7 +37,7 @@ public class S3SinkConnector extends Connector { private static final Logger log = LoggerFactory.getLogger(S3SinkConnector.class); private Map configProperties; - private S3SinkConnectorConfig config; + private static S3SinkConnectorConfig config; @Override public String version() { @@ -78,4 +78,8 @@ public void stop() throws ConnectException { public ConfigDef config() { return S3SinkConnectorConfig.getConfig(); } + + public static String getConfigString(String key) { + return config.getString(key); + } } diff --git a/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java b/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java index 8e5aac6..17f8caa 100644 --- a/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java +++ b/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java @@ -58,6 +58,9 @@ public class S3SinkConnectorConfig extends HdfsSinkConnectorConfig { public static final String NAME_DEFAULT = ""; private static final String NAME_DISPLAY = "Connector Name"; + public static final String PARQUET_SCHEMA_CONFIG = "s3.parquet.schema"; + private static final String PARQUET_SCHEMA_DOC = "The schema used to write a Parquet file"; + private static final String PARQUET_SCHEMA_DISPLAY = "Parquet Schema"; static { config.define(S3_URL_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, S3_URL_DOC, S3_GROUP, 1, ConfigDef.Width.MEDIUM, S3_URL_DISPLAY); @@ -66,6 +69,7 @@ public class S3SinkConnectorConfig extends HdfsSinkConnectorConfig { config.define(DB_USER_CONFIG, ConfigDef.Type.STRING, DB_USER_DEFAULT, ConfigDef.Importance.LOW, DB_USER_DOC, WAL_GROUP, 1, ConfigDef.Width.MEDIUM, DB_USER_DISPLAY); config.define(DB_PASSWORD_CONFIG, ConfigDef.Type.STRING, DB_PASSWORD_DEFAULT, ConfigDef.Importance.LOW, DB_PASSWORD_DOC, WAL_GROUP, 1, ConfigDef.Width.MEDIUM, DB_PASSWORD_DISPLAY); config.define(NAME_CONFIG, ConfigDef.Type.STRING, NAME_DEFAULT, ConfigDef.Importance.HIGH, NAME_DOC, S3_GROUP,1, ConfigDef.Width.MEDIUM, NAME_DISPLAY); + config.define(PARQUET_SCHEMA_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, PARQUET_SCHEMA_DOC, S3_GROUP, 1, ConfigDef.Width.LONG, PARQUET_SCHEMA_DISPLAY); } diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java index 620c458..f06a7df 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java @@ -16,7 +16,7 @@ import com.qubole.streamx.s3.S3Storage; import org.apache.hadoop.conf.Configuration; -import org.apache.kafka.clients.producer.internals.DefaultPartitioner; +// import org.apache.kafka.clients.producer.internals.DefaultPartitioner; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index b095abf..2040abc 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -73,8 +73,8 @@ public void testRecovery() throws Exception { @SuppressWarnings("unchecked") Class storageClass = (Class) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG)); - //Storage storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url); - Storage storage = StorageFactory.createStorage(storageClass, conf, url); + Storage storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url); + // Storage storage = StorageFactory.createStorage(storageClass, conf, url); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); partitioner = hdfsWriter.getPartitioner();