diff --git a/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/FlintSparkIndex.scala b/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/FlintSparkIndex.scala index 038a44005..248d105a2 100644 --- a/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/FlintSparkIndex.scala +++ b/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/FlintSparkIndex.scala @@ -96,7 +96,7 @@ object FlintSparkIndex { } /** - * Add backticks to table name for special character handling + * Add backticks to table name to escape special character * * @param fullTableName * source full table name @@ -104,7 +104,6 @@ object FlintSparkIndex { * quoted table name */ def quotedTableName(fullTableName: String): String = { - // TODO: add UT require(fullTableName.split('.').length >= 3, s"Table name $fullTableName is not qualified") val parts = fullTableName.split('.') diff --git a/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndex.scala b/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndex.scala index 8251febfe..2e8a3c82d 100644 --- a/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndex.scala +++ b/flint-spark-integration/src/main/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndex.scala @@ -77,7 +77,6 @@ case class FlintSparkSkippingIndex( new Column(aggFunc.as(name)) } - // todo: find all occurance of spark.read.table df.getOrElse(spark.read.table(quotedTableName(tableName))) .groupBy(input_file_name().as(FILE_PATH_COLUMN)) .agg(namedAggFuncs.head, namedAggFuncs.tail: _*) diff --git a/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/covering/FlintSparkCoveringIndexSuite.scala b/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/covering/FlintSparkCoveringIndexSuite.scala index 9829fd5fd..1cce47d1a 100644 --- a/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/covering/FlintSparkCoveringIndexSuite.scala +++ b/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/covering/FlintSparkCoveringIndexSuite.scala @@ -37,7 +37,7 @@ class FlintSparkCoveringIndexSuite extends FlintSuite { val df = spark.createDataFrame(Seq(("hello", 20))).toDF("name", "age") val indexDf = index.build(spark, Some(df)) - indexDf.schema.fieldNames should contain only("name") + indexDf.schema.fieldNames should contain only ("name") } test("can build index on table name with special characters") { @@ -46,7 +46,7 @@ class FlintSparkCoveringIndexSuite extends FlintSuite { val df = spark.createDataFrame(Seq(("hello", 20))).toDF("name", "age") val indexDf = index.build(spark, Some(df)) - indexDf.schema.fieldNames should contain only("name") + indexDf.schema.fieldNames should contain only ("name") } test("should fail if no indexed column given") { diff --git a/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndexSuite.scala b/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndexSuite.scala index 7a55bae0f..247a055bf 100644 --- a/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndexSuite.scala +++ b/flint-spark-integration/src/test/scala/org/opensearch/flint/spark/skipping/FlintSparkSkippingIndexSuite.scala @@ -77,7 +77,7 @@ class FlintSparkSkippingIndexSuite extends FlintSuite { val df = spark.createDataFrame(Seq(("hello", 20))).toDF("name", "age") val indexDf = index.build(spark, Some(df)) - indexDf.schema.fieldNames should contain only("name", FILE_PATH_COLUMN, ID_COLUMN) + indexDf.schema.fieldNames should contain only ("name", FILE_PATH_COLUMN, ID_COLUMN) } // Test index build for different column type