Skip to content

Commit

Permalink
Fix: Address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
karuppayya committed Jun 18, 2020
1 parent 85280ca commit 2b3704b
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2173,8 +2173,8 @@ object SQLConf {
.checkValue(bit => bit >= 10 && bit <= 30, "The bit value must be in [10, 30].")
.createWithDefault(16)

val SPILL_PARTIAL_AGGREGATE_DISABLED =
buildConf("spark.sql.aggregate.spill.partialaggregate.disabled")
val SKIP_PARTIAL_AGGREGATE_ENABLED =
buildConf("spark.sql.aggregate.partialaggregate.skip.enabled")
.internal()
.doc("Avoid sort/spill to disk during partial aggregation")
.booleanConf
Expand Down Expand Up @@ -2893,7 +2893,7 @@ class SQLConf extends Serializable with Logging {

def fastHashAggregateRowMaxCapacityBit: Int = getConf(FAST_HASH_AGGREGATE_MAX_ROWS_CAPACITY_BIT)

def spillInPartialAggregationDisabled: Boolean = getConf(SPILL_PARTIAL_AGGREGATE_DISABLED)
def spillInPartialAggregationDisabled: Boolean = getConf(SKIP_PARTIAL_AGGREGATE_ENABLED)

def datetimeJava8ApiEnabled: Boolean = getConf(DATETIME_JAVA8API_ENABLED)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ case class HashAggregateExec(
"peakMemory" -> SQLMetrics.createSizeMetric(sparkContext, "peak memory"),
"spillSize" -> SQLMetrics.createSizeMetric(sparkContext, "spill size"),
"aggTime" -> SQLMetrics.createTimingMetric(sparkContext, "time in aggregation build"),
"partialAggSkipped" -> SQLMetrics.createMetric(sparkContext, "Num records" +
" skipped partial aggregation skipped"),
"partialAggSkipped" -> SQLMetrics.createMetric(sparkContext,
"number of skipped records for partial aggregates"),
"avgHashProbe" ->
SQLMetrics.createAverageMetric(sparkContext, "avg hash probe bucket list iters"))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class WholeStageCodegenSuite extends QueryTest with SharedSparkSession
}

test(s"Avoid spill in partial aggregation" ) {
withSQLConf((SQLConf.SPILL_PARTIAL_AGGREGATE_DISABLED.key, "true")) {
withSQLConf((SQLConf.SKIP_PARTIAL_AGGREGATE_ENABLED.key, "true")) {
// Create Dataframes
val data = Seq(("James", 1), ("James", 1), ("Phil", 1))
val aggDF = data.toDF("name", "values").groupBy("name").sum("values")
Expand Down

0 comments on commit 2b3704b

Please sign in to comment.