Skip to content

Commit

Permalink
Fix spelling; rename file
Browse files Browse the repository at this point in the history
  • Loading branch information
jbampton committed Sep 25, 2024
1 parent ec45de9 commit b6ee234
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.sql.{DataFrame, Row}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

object AggregateWithinPartitons {
object AggregateWithinPartitions {

/**
* Run aggregation within each partition without incurring a data shuffle. Currently support
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
*/
package org.apache.sedona.viz.sql

import org.apache.sedona.viz.sql.operator.{AggregateWithinPartitons, VizPartitioner}
import org.apache.sedona.viz.sql.operator.{AggregateWithinPartitions, VizPartitioner}
import org.apache.sedona.viz.sql.utils.{Conf, LineageDecoder}
import org.apache.spark.sql.functions.lit
import org.locationtech.jts.geom.Envelope
Expand All @@ -43,7 +43,11 @@ class optVizOperatorTest extends VizTestBase {

// Test aggregation within partitions
val result =
AggregateWithinPartitons(newDf.withColumn("weight", lit(100.0)), "pixel", "weight", "avg")
AggregateWithinPartitions(
newDf.withColumn("weight", lit(100.0)),
"pixel",
"weight",
"avg")
assert(result.rdd.getNumPartitions == secondaryPID)

// Test the colorize operator
Expand Down Expand Up @@ -71,7 +75,7 @@ class optVizOperatorTest extends VizTestBase {
assert(newDf.rdd.getNumPartitions == secondaryPID)

// Test aggregation within partitions
val result = AggregateWithinPartitons(newDf, "pixel", "weight", "count")
val result = AggregateWithinPartitions(newDf, "pixel", "weight", "count")
assert(result.rdd.getNumPartitions == secondaryPID)

// Test the colorize operator
Expand Down

0 comments on commit b6ee234

Please sign in to comment.