Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-13131] [SQL] Use best and average time in benchmark #11018

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 14 additions & 7 deletions core/src/main/scala/org/apache/spark/util/Benchmark.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.spark.util

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

import org.apache.commons.lang3.SystemUtils

Expand Down Expand Up @@ -62,13 +63,15 @@ private[spark] class Benchmark(
val firstRate = results.head.avgRate
// The results are going to be processor specific so it is useful to include that.
println(Benchmark.getProcessorName())
printf("%-30s %16s %16s %14s\n", name + ":", "Avg Time(ms)", "Avg Rate(M/s)", "Relative Rate")
println("-------------------------------------------------------------------------------")
printf("%-30s %16s %12s %13s %10s\n", name + ":", "median Time(ms)", "Rate(M/s)", "Per Row(ns)",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you capitalize the M -> "Median Time"

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will update this when merging.

"Relative")
println("-------------------------------------------------------------------------------------")
results.zip(benchmarks).foreach { r =>
printf("%-30s %16s %16s %14s\n",
printf("%-30s %16s %10s %14s %10s\n",
r._2.name,
"%10.2f" format r._1.avgMs,
"%10.2f" format r._1.avgRate,
"%6.2f" format (1000 / r._1.avgRate),
"%6.2f X" format (r._1.avgRate / firstRate))
}
println
Expand Down Expand Up @@ -99,22 +102,26 @@ private[spark] object Benchmark {
* the rate of the function.
*/
def measure(num: Long, iters: Int, outputPerIteration: Boolean)(f: Int => Unit): Result = {
var totalTime = 0L
val runTimes = ArrayBuffer[Long]()
for (i <- 0 until iters + 1) {
val start = System.nanoTime()

f(i)

val end = System.nanoTime()
if (i != 0) totalTime += end - start
val runTime = end - start
if (i > 0) {
runTimes += runTime
}

if (outputPerIteration) {
// scalastyle:off
println(s"Iteration $i took ${(end - start) / 1000} microseconds")
println(s"Iteration $i took ${runTime / 1000} microseconds")
// scalastyle:on
}
}
Result(totalTime.toDouble / 1000000 / iters, num * iters / (totalTime.toDouble / 1000))
val result = runTimes.sortBy(x => x).apply(iters / 2).toDouble
Result(result / 1000000, num / (result / 1000))
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -33,53 +33,44 @@ import org.apache.spark.util.Benchmark
*/
class BenchmarkWholeStageCodegen extends SparkFunSuite {
lazy val conf = new SparkConf().setMaster("local[1]").setAppName("benchmark")
.set("spark.sql.shuffle.partitions", "1")
lazy val sc = SparkContext.getOrCreate(conf)
lazy val sqlContext = SQLContext.getOrCreate(sc)

def testWholeStage(values: Int): Unit = {
val benchmark = new Benchmark("rang/filter/aggregate", values)
def runBenchmark(name: String, values: Int)(f: => Unit): Unit = {
val benchmark = new Benchmark(name, values)

benchmark.addCase("Without codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "false")
sqlContext.range(values).filter("(id & 1) = 1").count()
Seq(false, true).foreach { enabled =>
benchmark.addCase(s"$name codegen=$enabled") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", enabled.toString)
f
}
}

benchmark.addCase("With codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "true")
sqlContext.range(values).filter("(id & 1) = 1").count()
}
benchmark.run()
}

def testWholeStage(values: Int): Unit = {

runBenchmark("rang/filter/sum", values) {
sqlContext.range(values).filter("(id & 1) = 1").groupBy().sum().collect()
}
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
rang/filter/aggregate: Avg Time(ms) Avg Rate(M/s) Relative Rate
-------------------------------------------------------------------------------
Without codegen 7775.53 26.97 1.00 X
With codegen 342.15 612.94 22.73 X
rang/filter/sum: median Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------
rang/filter/sum codegen=false 12823.55 40.88 24.46 1.00 X
rang/filter/sum codegen=true 831.80 630.30 1.59 15.42 X
*/
benchmark.run()
}

def testStatFunctions(values: Int): Unit = {

val benchmark = new Benchmark("stat functions", values)

benchmark.addCase("stddev w/o codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "false")
runBenchmark("stddev", values) {
sqlContext.range(values).groupBy().agg("id" -> "stddev").collect()
}

benchmark.addCase("stddev w codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "true")
sqlContext.range(values).groupBy().agg("id" -> "stddev").collect()
}

benchmark.addCase("kurtosis w/o codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "false")
sqlContext.range(values).groupBy().agg("id" -> "kurtosis").collect()
}

benchmark.addCase("kurtosis w codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "true")
runBenchmark("kurtosis", values) {
sqlContext.range(values).groupBy().agg("id" -> "kurtosis").collect()
}

Expand All @@ -98,36 +89,32 @@ class BenchmarkWholeStageCodegen extends SparkFunSuite {
Using DeclarativeAggregate:

Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
stddev: Avg Time(ms) Avg Rate(M/s) Relative Rate
-------------------------------------------------------------------------------
stddev w/o codegen 989.22 21.20 1.00 X
stddev w codegen 352.35 59.52 2.81 X
kurtosis w/o codegen 3636.91 5.77 0.27 X
kurtosis w codegen 369.25 56.79 2.68 X
stddev: median Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------
stddev codegen=false 1644.14 12.76 78.40 1.00 X
stddev codegen=true 349.35 60.03 16.66 4.71 X

Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
kurtosis: median Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------
kurtosis codegen=false 3491.49 6.01 166.49 1.00 X
kurtosis codegen=true 561.54 37.35 26.78 6.22 X
*/
benchmark.run()
}

def testAggregateWithKey(values: Int): Unit = {
val benchmark = new Benchmark("Aggregate with keys", values)

benchmark.addCase("Aggregate w/o codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "false")
sqlContext.range(values).selectExpr("(id & 65535) as k").groupBy("k").sum().collect()
}
benchmark.addCase(s"Aggregate w codegen") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", "true")
runBenchmark("Aggregate w keys", values) {
sqlContext.range(values).selectExpr("(id & 65535) as k").groupBy("k").sum().collect()
}

/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Aggregate with keys: Avg Time(ms) Avg Rate(M/s) Relative Rate
-------------------------------------------------------------------------------
Aggregate w/o codegen 4254.38 4.93 1.00 X
Aggregate w codegen 2661.45 7.88 1.60 X
Aggregate w keys: median Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------
Aggregate w keys codegen=false 2390.44 8.77 113.99 1.00 X
Aggregate w keys codegen=true 1669.62 12.56 79.61 1.43 X
*/
benchmark.run()
}

def testBytesToBytesMap(values: Int): Unit = {
Expand Down Expand Up @@ -187,20 +174,20 @@ class BenchmarkWholeStageCodegen extends SparkFunSuite {

/**
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Aggregate with keys: Avg Time(ms) Avg Rate(M/s) Relative Rate
-------------------------------------------------------------------------------
hash 662.06 79.19 1.00 X
BytesToBytesMap (off Heap) 2209.42 23.73 0.30 X
BytesToBytesMap (on Heap) 2957.68 17.73 0.22 X
BytesToBytesMap: median Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------
hash 663.70 78.99 12.66 1.00 X
BytesToBytesMap (off Heap) 3389.42 15.47 64.65 0.20 X
BytesToBytesMap (on Heap) 3476.07 15.08 66.30 0.19 X
*/
benchmark.run()
}

// These benchmark are skipped in normal build
ignore("benchmark") {
test("benchmark") {
// testWholeStage(200 << 20)
// testStddev(20 << 20)
// testStatFunctions(20 << 20)
// testAggregateWithKey(20 << 20)
// testBytesToBytesMap(1024 * 1024 * 50)
// testBytesToBytesMap(50 << 20)
}
}