[SPARK-25581][SQL] Rename method benchmark as runBenchmarkSuite in BenchmarkBase

## What changes were proposed in this pull request?

Rename method `benchmark` in `BenchmarkBase` as `runBenchmarkSuite `. Also add comments.
Currently the method name `benchmark` is a bit confusing. Also the name is the same as instances of `Benchmark`:

f246813afb/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala (L330-L339)

## How was this patch tested?

Unit test.

Closes #22599 from gengliangwang/renameBenchmarkSuite.

Authored-by: Gengliang Wang <gengliang.wang@databricks.com>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
This commit is contained in:
Gengliang Wang 2018-10-02 10:04:47 -07:00 committed by Dongjoon Hyun
parent 9bf397c0e4
commit 7b4e94f160
No known key found for this signature in database
GPG key ID: EDA00CE834F0FC5C
10 changed files with 16 additions and 11 deletions

View file

@ -25,7 +25,12 @@ import java.io.{File, FileOutputStream, OutputStream}
abstract class BenchmarkBase {
var output: Option[OutputStream] = None
def benchmark(): Unit
/**
* Main process of the whole benchmark.
* Implementations of this method are supposed to use the wrapper method `runBenchmark`
* for each benchmark scenario.
*/
def runBenchmarkSuite(): Unit
final def runBenchmark(benchmarkName: String)(func: => Any): Unit = {
val separator = "=" * 96
@ -46,7 +51,7 @@ abstract class BenchmarkBase {
output = Some(new FileOutputStream(file))
}
benchmark()
runBenchmarkSuite()
output.foreach { o =>
if (o != null) {

View file

@ -32,7 +32,7 @@ import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
*/
object UDTSerializationBenchmark extends BenchmarkBase {
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("VectorUDT de/serialization") {
val iters = 1e2.toInt

View file

@ -41,7 +41,7 @@ object UnsafeProjectionBenchmark extends BenchmarkBase {
(1 to numRows).map(_ => encoder.toRow(generator().asInstanceOf[Row]).copy()).toArray
}
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("unsafe projection") {
val iters = 1024 * 16
val numRows = 1024 * 16

View file

@ -44,7 +44,7 @@ import org.apache.spark.unsafe.map.BytesToBytesMap
*/
object AggregateBenchmark extends SqlBasedBenchmark {
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("aggregate without grouping") {
val N = 500L << 22
codegenBenchmark("agg w/o group", N) {

View file

@ -198,7 +198,7 @@ object FilterPushdownBenchmark extends BenchmarkBase with SQLHelper {
}
}
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("Pushdown for many distinct value case") {
withTempPath { dir =>
withTempTable("orcTable", "parquetTable") {

View file

@ -36,7 +36,7 @@ object PrimitiveArrayBenchmark extends BenchmarkBase {
.config("spark.sql.autoBroadcastJoinThreshold", 1)
.getOrCreate()
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("Write primitive arrays in dataset") {
writeDatasetArray(4)
}

View file

@ -119,7 +119,7 @@ object SortBenchmark extends BenchmarkBase {
benchmark.run()
}
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("radix sort") {
sortBenchmark()
}

View file

@ -233,7 +233,7 @@ object CompressionSchemeBenchmark extends BenchmarkBase with AllCompressionSchem
runDecodeBenchmark("STRING Decode", iters, count, STRING, testData)
}
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("Compression Scheme Benchmark") {
bitEncodingBenchmark(1024)
shortEncodingBenchmark(1024)

View file

@ -443,7 +443,7 @@ object ColumnarBatchBenchmark extends BenchmarkBase {
benchmark.run
}
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("Int Read/Write") {
intAccess(1024 * 40)
}

View file

@ -336,7 +336,7 @@ object OrcReadBenchmark extends BenchmarkBase with SQLHelper {
}
}
override def benchmark(): Unit = {
override def runBenchmarkSuite(): Unit = {
runBenchmark("SQL Single Numeric Column Scan") {
Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType).foreach { dataType =>
numericScanBenchmark(1024 * 1024 * 15, dataType)