Skip to content

Commit

Permalink
update using scalafmtAll
Browse files Browse the repository at this point in the history
Signed-off-by: YANGDB <[email protected]>
  • Loading branch information
YANG-DB committed Oct 19, 2023
1 parent bd9a33b commit bc320bd
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class FlintSparkCoveringIndexSqlITSuite extends FlintSparkSuite {

createPartitionedTable(testTable)
}

override def afterEach(): Unit = {
super.afterEach()

Expand Down Expand Up @@ -94,7 +94,7 @@ class FlintSparkCoveringIndexSqlITSuite extends FlintSparkSuite {
(settings \ "index.number_of_shards").extract[String] shouldBe "2"
(settings \ "index.number_of_replicas").extract[String] shouldBe "3"
}

test("create covering index with invalid option") {
the[IllegalArgumentException] thrownBy
sql(s"""
Expand Down Expand Up @@ -235,8 +235,7 @@ class FlintSparkCoveringIndexSqlITSuite extends FlintSparkSuite {
}

test("use existing index as the covering index") {
sql(
s"""
sql(s"""
| CREATE INDEX $testIndex ON $testTable USING $targetIndex ( name )
| WITH (
| index_settings = '{"number_of_shards": 2, "number_of_replicas": 3}'
Expand All @@ -250,7 +249,7 @@ class FlintSparkCoveringIndexSqlITSuite extends FlintSparkSuite {
var settings = parse(flintClient.getIndexMetadata(targetIndex).indexSettings.get)
(settings \ "index.number_of_shards").extract[String] shouldBe "2"
(settings \ "index.number_of_replicas").extract[String] shouldBe "3"
//validate the index alias is working
// validate the index alias is working
settings = parse(flintClient.getIndexMetadata(testFlintIndex).indexSettings.get)
(settings \ "index.number_of_shards").extract[String] shouldBe "2"
(settings \ "index.number_of_replicas").extract[String] shouldBe "3"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class FlintSparkMaterializedViewITSuite extends FlintSparkSuite {
| }
|""".stripMargin)
}

// TODO: fix this windowing function unable to be used in GROUP BY
ignore("full refresh materialized view") {
flint
Expand Down Expand Up @@ -198,8 +198,7 @@ class FlintSparkMaterializedViewITSuite extends FlintSparkSuite {

val index = flint.describeIndex("existing_index")
index shouldBe defined
index.get.metadata().getContent() should matchJson(
s"""
index.get.metadata().getContent() should matchJson(s"""
| {
| "_meta": {
| "version": "${current()}",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {

val index = flint.describeIndex(testIndex)
index shouldBe defined
val optionJson = compact(render(
parse(index.get.metadata().getContent()) \ "_meta" \ "options"))
val optionJson =
compact(render(parse(index.get.metadata().getContent()) \ "_meta" \ "options"))
optionJson should matchJson("""
| {
| "auto_refresh": "true",
Expand Down Expand Up @@ -321,8 +321,7 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {
|""".stripMargin)

query.queryExecution.executedPlan should
useFlintSparkSkippingFileIndex(
hasIndexFilter(col("year") === 2023))
useFlintSparkSkippingFileIndex(hasIndexFilter(col("year") === 2023))
}

test("should not rewrite original query if filtering condition has disjunction") {
Expand Down Expand Up @@ -388,8 +387,7 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {
// Prepare test table
val testTable = "spark_catalog.default.data_type_table"
val testIndex = getSkippingIndexName(testTable)
sql(
s"""
sql(s"""
| CREATE TABLE $testTable
| (
| boolean_col BOOLEAN,
Expand All @@ -408,8 +406,7 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {
| )
| USING PARQUET
|""".stripMargin)
sql(
s"""
sql(s"""
| INSERT INTO $testTable
| VALUES (
| TRUE,
Expand Down Expand Up @@ -449,8 +446,7 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {

val index = flint.describeIndex(testIndex)
index shouldBe defined
index.get.metadata().getContent() should matchJson(
s"""{
index.get.metadata().getContent() should matchJson(s"""{
| "_meta": {
| "name": "flint_spark_catalog_default_data_type_table_skipping_index",
| "version": "${current()}",
Expand Down Expand Up @@ -587,17 +583,15 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {
test("can build skipping index for varchar and char and rewrite applicable query") {
val testTable = "spark_catalog.default.varchar_char_table"
val testIndex = getSkippingIndexName(testTable)
sql(
s"""
sql(s"""
| CREATE TABLE $testTable
| (
| varchar_col VARCHAR(20),
| char_col CHAR(20)
| )
| USING PARQUET
|""".stripMargin)
sql(
s"""
sql(s"""
| INSERT INTO $testTable
| VALUES (
| "sample varchar",
Expand All @@ -613,8 +607,7 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {
.create()
flint.refreshIndex(testIndex, FULL)

val query = sql(
s"""
val query = sql(s"""
| SELECT varchar_col, char_col
| FROM $testTable
| WHERE varchar_col = "sample varchar" AND char_col = "sample char"
Expand All @@ -624,8 +617,8 @@ class FlintSparkSkippingIndexITSuite extends FlintSparkSuite {
val paddedChar = "sample char".padTo(20, ' ')
checkAnswer(query, Row("sample varchar", paddedChar))
query.queryExecution.executedPlan should
useFlintSparkSkippingFileIndex(hasIndexFilter(
col("varchar_col") === "sample varchar" && col("char_col") === paddedChar))
useFlintSparkSkippingFileIndex(
hasIndexFilter(col("varchar_col") === "sample varchar" && col("char_col") === paddedChar))

flint.deleteIndex(testIndex)
}
Expand Down

0 comments on commit bc320bd

Please sign in to comment.