Skip to content

Commit

Permalink
update comments; scalafmtAll
Browse files Browse the repository at this point in the history
Signed-off-by: Sean Kao <[email protected]>
  • Loading branch information
seankao-az committed Jan 12, 2024
1 parent f46c51a commit 67d3e4c
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,14 @@ object FlintSparkIndex {
}

/**
* Add backticks to table name for special character handling
* Add backticks to table name to escape special character
*
* @param fullTableName
* source full table name
* @return
* quoted table name
*/
def quotedTableName(fullTableName: String): String = {
// TODO: add UT
require(fullTableName.split('.').length >= 3, s"Table name $fullTableName is not qualified")

val parts = fullTableName.split('.')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ case class FlintSparkSkippingIndex(
new Column(aggFunc.as(name))
}

// todo: find all occurance of spark.read.table
df.getOrElse(spark.read.table(quotedTableName(tableName)))
.groupBy(input_file_name().as(FILE_PATH_COLUMN))
.agg(namedAggFuncs.head, namedAggFuncs.tail: _*)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class FlintSparkCoveringIndexSuite extends FlintSuite {

val df = spark.createDataFrame(Seq(("hello", 20))).toDF("name", "age")
val indexDf = index.build(spark, Some(df))
indexDf.schema.fieldNames should contain only("name")
indexDf.schema.fieldNames should contain only ("name")
}

test("can build index on table name with special characters") {
Expand All @@ -46,7 +46,7 @@ class FlintSparkCoveringIndexSuite extends FlintSuite {

val df = spark.createDataFrame(Seq(("hello", 20))).toDF("name", "age")
val indexDf = index.build(spark, Some(df))
indexDf.schema.fieldNames should contain only("name")
indexDf.schema.fieldNames should contain only ("name")
}

test("should fail if no indexed column given") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class FlintSparkSkippingIndexSuite extends FlintSuite {

val df = spark.createDataFrame(Seq(("hello", 20))).toDF("name", "age")
val indexDf = index.build(spark, Some(df))
indexDf.schema.fieldNames should contain only("name", FILE_PATH_COLUMN, ID_COLUMN)
indexDf.schema.fieldNames should contain only ("name", FILE_PATH_COLUMN, ID_COLUMN)
}

// Test index build for different column type
Expand Down

0 comments on commit 67d3e4c

Please sign in to comment.