From 04894da39b0ecf0486fcb7bdb3f25af2c6813151 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 16 Jul 2018 17:58:23 +0200 Subject: [PATCH 01/34] [benchmark][Gardening] Literate testing (markdown) The testing of `Benchmark_O` and its public interface needs more documentation. The `lit` tests can be embedded in anything. Lets combine these. --- test/benchmark/{Benchmark_O.test-sh => Benchmark_O.test.md} | 0 test/benchmark/lit.local.cfg | 2 ++ 2 files changed, 2 insertions(+) rename test/benchmark/{Benchmark_O.test-sh => Benchmark_O.test.md} (100%) create mode 100644 test/benchmark/lit.local.cfg diff --git a/test/benchmark/Benchmark_O.test-sh b/test/benchmark/Benchmark_O.test.md similarity index 100% rename from test/benchmark/Benchmark_O.test-sh rename to test/benchmark/Benchmark_O.test.md diff --git a/test/benchmark/lit.local.cfg b/test/benchmark/lit.local.cfg new file mode 100644 index 0000000000000..614deea3f910a --- /dev/null +++ b/test/benchmark/lit.local.cfg @@ -0,0 +1,2 @@ +# suffixes: A list of file extensions to treat as test files. +config.suffixes.add('.md') From a74759bb4979011fb51aec6bc7b7238b2beb1aa7 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 16 Jul 2018 18:07:43 +0200 Subject: [PATCH 02/34] [benchmark][docs] Re-formatting to markdown Reformatting the `lit` test file to markdown format: * Removed the unnecessary `// `prefix. * Requires preambule in comment * Test documentation intro --- test/benchmark/Benchmark_O.test.md | 117 +++++++++++++++++------------ 1 file changed, 67 insertions(+), 50 deletions(-) diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index 7e991e9240c2d..dfe25c4c35a33 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -1,50 +1,67 @@ -// REQUIRES: OS=macosx -// REQUIRES: asserts -// REQUIRES: benchmark -// REQUIRES: CMAKE_GENERATOR=Ninja - -// RUN: %Benchmark_O --list | %FileCheck %s --check-prefix LISTTAGS -// LISTTAGS: AngryPhonebook,[ -// LISTTAGS-NOT: TestsUtils.BenchmarkCategory. -// LISTTAGS-SAME: String, -// LISTTAGS-SAME: ] - -// RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ -// RUN: | %FileCheck %s --check-prefix NUMITERS1 -// NUMITERS1: AngryPhonebook,1 -// NUMITERS1-NOT: 0,0,0,0,0 - -// Should run benchmark by name, even if its tags match the default skip-tags -// (unstable,skip). Ackermann is marked unstable -// RUN: %Benchmark_O Ackermann | %FileCheck %s --check-prefix NAMEDSKIP -// NAMEDSKIP: Ackermann - -// RUN: %Benchmark_O --list --tags=Dictionary,Array \ -// RUN: | %FileCheck %s --check-prefix ANDTAGS -// ANDTAGS: TwoSum -// ANDTAGS-NOT: Array2D -// ANDTAGS-NOT: DictionarySwap - -// RUN: %Benchmark_O --list --tags=algorithm --skip-tags=validation \ -// RUN: | %FileCheck %s --check-prefix TAGSANDSKIPTAGS -// TAGSANDSKIPTAGS: Ackermann -// TAGSANDSKIPTAGS: DictOfArraysToArrayOfDicts -// TAGSANDSKIPTAGS: Fibonacci -// TAGSANDSKIPTAGS: RomanNumbers - -// RUN: %Benchmark_O --list --tags=algorithm \ -// RUN: --skip-tags=validation,Dictionary,String \ -// RUN: | %FileCheck %s --check-prefix ORSKIPTAGS -// ORSKIPTAGS: Ackermann -// ORSKIPTAGS-NOT: DictOfArraysToArrayOfDicts -// ORSKIPTAGS: Fibonacci -// ORSKIPTAGS-NOT: RomanNumbers - -// RUN: %Benchmark_O --list | %FileCheck %s --check-prefix LISTPRECOMMIT -// LISTPRECOMMIT: #,Test,[Tags] -// LISTPRECOMMIT-NOT: Ackermann -// LISTPRECOMMIT: {{[0-9]+}},AngryPhonebook - -// RUN: %Benchmark_O --list --skip-tags= | %FileCheck %s --check-prefix LISTALL -// LISTALL: Ackermann -// LISTALL: AngryPhonebook + +# `Benchmark_O` Tests + +The `Benchmark_O` binary is used directly from command line as well as a +subcomponent invoked from higher-level scripts (eg. [`Benchmark_Driver`][BD]). +These script therefore depend on the supported command line options and the +format of its console output. The following [`lit` tests][Testing] also serve +as a verification of this public API to prevent its accidental breakage. + +[BD]: https://github.com/apple/swift/blob/master/benchmark/scripts/Benchmark_Driver +[Testing]: https://github.com/apple/swift/blob/master/docs/Testing.md + +```` +RUN: %Benchmark_O --list | %FileCheck %s --check-prefix LISTTAGS +LISTTAGS: AngryPhonebook,[ +LISTTAGS-NOT: TestsUtils.BenchmarkCategory. +LISTTAGS-SAME: String, +LISTTAGS-SAME: ] + +RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ +RUN: | %FileCheck %s --check-prefix NUMITERS1 +NUMITERS1: AngryPhonebook,1 +NUMITERS1-NOT: 0,0,0,0,0 +```` + +Should run benchmark by name, even if its tags match the default skip-tags +(unstable,skip). Ackermann is marked unstable + +```` +RUN: %Benchmark_O Ackermann | %FileCheck %s --check-prefix NAMEDSKIP +NAMEDSKIP: Ackermann + +RUN: %Benchmark_O --list --tags=Dictionary,Array \ +RUN: | %FileCheck %s --check-prefix ANDTAGS +ANDTAGS: TwoSum +ANDTAGS-NOT: Array2D +ANDTAGS-NOT: DictionarySwap + +RUN: %Benchmark_O --list --tags=algorithm --skip-tags=validation \ +RUN: | %FileCheck %s --check-prefix TAGSANDSKIPTAGS +TAGSANDSKIPTAGS: Ackermann +TAGSANDSKIPTAGS: DictOfArraysToArrayOfDicts +TAGSANDSKIPTAGS: Fibonacci +TAGSANDSKIPTAGS: RomanNumbers + +RUN: %Benchmark_O --list --tags=algorithm \ +RUN: --skip-tags=validation,Dictionary,String \ +RUN: | %FileCheck %s --check-prefix ORSKIPTAGS +ORSKIPTAGS: Ackermann +ORSKIPTAGS-NOT: DictOfArraysToArrayOfDicts +ORSKIPTAGS: Fibonacci +ORSKIPTAGS-NOT: RomanNumbers + +RUN: %Benchmark_O --list | %FileCheck %s --check-prefix LISTPRECOMMIT +LISTPRECOMMIT: #,Test,[Tags] +LISTPRECOMMIT-NOT: Ackermann +LISTPRECOMMIT: {{[0-9]+}},AngryPhonebook + +RUN: %Benchmark_O --list --skip-tags= | %FileCheck %s --check-prefix LISTALL +LISTALL: Ackermann +LISTALL: AngryPhonebook +```` From 43390e8d5bfad99bc234562d8f243c3bf7c284dd Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 16 Jul 2018 18:39:06 +0200 Subject: [PATCH 03/34] [benchmark] Combined `--list` tests, documentation --- test/benchmark/Benchmark_O.test.md | 39 +++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index dfe25c4c35a33..405f430983e9f 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -15,13 +15,37 @@ as a verification of this public API to prevent its accidental breakage. [BD]: https://github.com/apple/swift/blob/master/benchmark/scripts/Benchmark_Driver [Testing]: https://github.com/apple/swift/blob/master/docs/Testing.md +Note: Following tests use *Ackermann* as an example of a benchmark that is +excluded from the default "pre-commit" list because it is marked `unstable` and +the default skip-tags (`unstable,skip`) will exclude it. It's also +alphabetically the first benchmark in the test suite (used to verify running by +index). If these assumptions change, the test must be adapted. + +## List Format ```` -RUN: %Benchmark_O --list | %FileCheck %s --check-prefix LISTTAGS -LISTTAGS: AngryPhonebook,[ +RUN: %Benchmark_O --list | %FileCheck %s \ +RUN: --check-prefix LISTPRECOMMIT \ +RUN: --check-prefix LISTTAGS +LISTPRECOMMIT: #,Test,[Tags] +LISTPRECOMMIT-NOT: Ackermann +LISTPRECOMMIT: {{[0-9]+}},AngryPhonebook +LISTTAGS-SAME: ,[ LISTTAGS-NOT: TestsUtils.BenchmarkCategory. -LISTTAGS-SAME: String, +LISTTAGS-SAME: String, api, validation LISTTAGS-SAME: ] +```` + +Verify Ackermann is listed when skip-tags are explicitly empty and that it is +marked unstable: +```` +RUN: %Benchmark_O --list --skip-tags= | %FileCheck %s --check-prefix LISTALL +LISTALL: Ackermann +LISTALL-SAME: unstable +LISTALL: AngryPhonebook +```` + +```` RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ RUN: | %FileCheck %s --check-prefix NUMITERS1 NUMITERS1: AngryPhonebook,1 @@ -55,13 +79,4 @@ ORSKIPTAGS: Ackermann ORSKIPTAGS-NOT: DictOfArraysToArrayOfDicts ORSKIPTAGS: Fibonacci ORSKIPTAGS-NOT: RomanNumbers - -RUN: %Benchmark_O --list | %FileCheck %s --check-prefix LISTPRECOMMIT -LISTPRECOMMIT: #,Test,[Tags] -LISTPRECOMMIT-NOT: Ackermann -LISTPRECOMMIT: {{[0-9]+}},AngryPhonebook - -RUN: %Benchmark_O --list --skip-tags= | %FileCheck %s --check-prefix LISTALL -LISTALL: Ackermann -LISTALL: AngryPhonebook ```` From e3cbd187a080db5bd2b327d35723bbdca50dbb59 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 16 Jul 2018 19:27:03 +0200 Subject: [PATCH 04/34] [benchmark] Improved docs and tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added check for running by test number. * Documented “dry run” using `--list`. * Moved real run test to the end. * Added checks for logging benchmarks measurements (header and stats). * Added check that specifying the same test multiple times runs it just once. --- test/benchmark/Benchmark_O.test.md | 43 +++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index 405f430983e9f..6de1fda1c7bfe 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -45,20 +45,25 @@ LISTALL-SAME: unstable LISTALL: AngryPhonebook ```` -```` -RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ -RUN: | %FileCheck %s --check-prefix NUMITERS1 -NUMITERS1: AngryPhonebook,1 -NUMITERS1-NOT: 0,0,0,0,0 -```` +## Benchmark Selection +The logic for filtering tests based on specified names, indices and tags +is shared between the default "run" and `--list` commands. It is tested on +the list command, which is much faster, because it runs no benchmarks. +It provides us with ability to do a "dry run". -Should run benchmark by name, even if its tags match the default skip-tags -(unstable,skip). Ackermann is marked unstable +Run benchmark by name (even if its tags match the skip-tags) or test number: ```` -RUN: %Benchmark_O Ackermann | %FileCheck %s --check-prefix NAMEDSKIP +RUN: %Benchmark_O Ackermann --list | %FileCheck %s --check-prefix NAMEDSKIP NAMEDSKIP: Ackermann +RUN: %Benchmark_O 1 --list | %FileCheck %s --check-prefix RUNBYNUMBER +RUNBYNUMBER: Ackermann +```` + +Composition of `tags` and `skip-tags`: + +```` RUN: %Benchmark_O --list --tags=Dictionary,Array \ RUN: | %FileCheck %s --check-prefix ANDTAGS ANDTAGS: TwoSum @@ -80,3 +85,23 @@ ORSKIPTAGS-NOT: DictOfArraysToArrayOfDicts ORSKIPTAGS: Fibonacci ORSKIPTAGS-NOT: RomanNumbers ```` + +## Running Benchmarks +Each real benchmark execution takes about a second. If possible, multiple checks +are combined into one run to minimize the test time. + +```` +RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ +RUN: | %FileCheck %s --check-prefix NUMITERS1 \ +RUN: --check-prefix LOGHEADER \ +RUN: --check-prefix LOGBENCH +LOGHEADER-LABEL: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us) +LOGBENCH: {{[0-9]+}}, +NUMITERS1: AngryPhonebook,1 +NUMITERS1-NOT: 0,0,0,0,0 +LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} + +RUN: %Benchmark_O 1 Ackermann 1 | %FileCheck %s --check-prefix RUNJUSTONCE +RUNJUSTONCE-LABEL: 1,Ackermann +RUNJUSTONCE-NOT: 1,Ackermann +```` From 0efdd8d67f14cc884b061886412c6abb82f9dae9 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 13 Jul 2018 12:12:51 +0200 Subject: [PATCH 05/34] [benchmark][Gardening] Nicer header composition --- benchmark/utils/DriverUtils.swift | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 5675c767ecd9b..99cba9373b04d 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -439,10 +439,14 @@ func printRunInfo(_ c: TestConfig) { } func runBenchmarks(_ c: TestConfig) { - let units = "us" - print("#\(c.delim)TEST\(c.delim)SAMPLES\(c.delim)MIN(\(units))\(c.delim)MAX(\(units))\(c.delim)MEAN(\(units))\(c.delim)SD(\(units))\(c.delim)MEDIAN(\(units))") var sumBenchResults = BenchResults() sumBenchResults.sampleCount = 0 + let withUnit = {$0 + "(us)"} + let header = ( + ["#", "TEST", "SAMPLES"] + + ["MIN", "MAX", "MEAN", "SD", "MEDIAN"].map(withUnit) + ).joined(separator: c.delim) + print(header) for t in c.tests { guard let results = runBench(t, c) else { From c7fc745fffd4785db0c121fff176f6edbce12907 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 13 Jul 2018 12:19:37 +0200 Subject: [PATCH 06/34] [benchmark] Removed bogus Totals stats Report only the total number of executed tests. Aggregating MIN, MAX and MEAN values for all executed benchmarks together (with microsecond precision!) has no statistical relevance. --- benchmark/utils/DriverUtils.swift | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 99cba9373b04d..33c0e0b29dabb 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -439,8 +439,6 @@ func printRunInfo(_ c: TestConfig) { } func runBenchmarks(_ c: TestConfig) { - var sumBenchResults = BenchResults() - sumBenchResults.sampleCount = 0 let withUnit = {$0 + "(us)"} let header = ( ["#", "TEST", "SAMPLES"] + @@ -448,6 +446,8 @@ func runBenchmarks(_ c: TestConfig) { ).joined(separator: c.delim) print(header) + var testCount = 0 + for t in c.tests { guard let results = runBench(t, c) else { print("\(t.index)\(c.delim)\(t.name)\(c.delim)Unsupported") @@ -457,18 +457,11 @@ func runBenchmarks(_ c: TestConfig) { print("\(t.index)\(c.delim)\(t.name)\(c.delim)\(results.description)") fflush(stdout) - sumBenchResults.min += results.min - sumBenchResults.max += results.max - sumBenchResults.mean += results.mean - sumBenchResults.sampleCount += 1 - // Don't accumulate SD and Median, as simple sum isn't valid for them. - // TODO: Compute SD and Median for total results as well. - // sumBenchResults.sd += results.sd - // sumBenchResults.median += results.median + testCount += 1 } print("") - print("Totals\(c.delim)\(sumBenchResults.description)") + print("Totals\(c.delim)\(testCount)") } public func main() { From 08364520668a7043e57a165352324b4dbb6ee26f Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 16 Jul 2018 11:40:43 +0200 Subject: [PATCH 07/34] [benchmark][Gardening] BenchmarkResults formatting Moved the formatting of`BenchmarkResults` into `runBenchmarks` function which already contained the logging of header and the special case of unsupported benchmark. --- benchmark/utils/DriverUtils.swift | 59 ++++++++++--------------------- 1 file changed, 19 insertions(+), 40 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 33c0e0b29dabb..4b06e239c7703 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -19,38 +19,7 @@ import Darwin import TestsUtils struct BenchResults { - var delim: String = "," - var sampleCount: UInt64 = 0 - var min: UInt64 = 0 - var max: UInt64 = 0 - var mean: UInt64 = 0 - var sd: UInt64 = 0 - var median: UInt64 = 0 - - init() {} - - init(delim: String, sampleCount: UInt64, min: UInt64, max: UInt64, mean: UInt64, sd: UInt64, median: UInt64) { - self.delim = delim - self.sampleCount = sampleCount - self.min = min - self.max = max - self.mean = mean - self.sd = sd - self.median = median - - // Sanity the bounds of our results - precondition(self.min <= self.max, "min should always be <= max") - precondition(self.min <= self.mean, "min should always be <= mean") - precondition(self.min <= self.median, "min should always be <= median") - precondition(self.max >= self.mean, "max should always be >= mean") - precondition(self.max >= self.median, "max should always be >= median") - } -} - -extension BenchResults : CustomStringConvertible { - var description: String { - return "\(sampleCount)\(delim)\(min)\(delim)\(max)\(delim)\(mean)\(delim)\(sd)\(delim)\(median)" - } + let sampleCount, min, max, mean, sd, median: UInt64 } struct Test { @@ -413,7 +382,7 @@ func runBench(_ test: Test, _ c: TestConfig) -> BenchResults? { let (mean, sd) = internalMeanSD(samples) // Return our benchmark results. - return BenchResults(delim: c.delim, sampleCount: UInt64(samples.count), + return BenchResults(sampleCount: UInt64(samples.count), min: samples.min()!, max: samples.max()!, mean: mean, sd: sd, median: internalMedian(samples)) } @@ -438,6 +407,7 @@ func printRunInfo(_ c: TestConfig) { } } +/// Execute benchmarks and continuously report the measurement results. func runBenchmarks(_ c: TestConfig) { let withUnit = {$0 + "(us)"} let header = ( @@ -448,16 +418,25 @@ func runBenchmarks(_ c: TestConfig) { var testCount = 0 - for t in c.tests { - guard let results = runBench(t, c) else { - print("\(t.index)\(c.delim)\(t.name)\(c.delim)Unsupported") - fflush(stdout) - continue + func report(_ t: Test, results: BenchResults?) { + func values(r: BenchResults) -> [String] { + return [r.sampleCount, r.min, r.max, r.mean, r.sd, r.median] + .map { String($0) } } - print("\(t.index)\(c.delim)\(t.name)\(c.delim)\(results.description)") + let benchmarkStats = ( + [String(t.index), t.name] + (results.map(values) ?? ["Unsupported"]) + ).joined(separator: c.delim) + + print(benchmarkStats) fflush(stdout) - testCount += 1 + if (results != nil) { + testCount += 1 + } + } + + for t in c.tests { + report(t, results:runBench(t, c)) } print("") From 8d0d25a576dd308235055e8ad25c430f70ce84c1 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Tue, 17 Jul 2018 07:17:17 +0200 Subject: [PATCH 08/34] [benchmark] Verbose mode checks Test the `--verbose` mode output and the `--num-samples` option. --- test/benchmark/Benchmark_O.test.md | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index 6de1fda1c7bfe..b022c21bfbd21 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -92,16 +92,32 @@ are combined into one run to minimize the test time. ```` RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ -RUN: | %FileCheck %s --check-prefix NUMITERS1 \ -RUN: --check-prefix LOGHEADER \ -RUN: --check-prefix LOGBENCH +RUN: | %FileCheck %s --check-prefix NUMITERS1 \ +RUN: --check-prefix LOGHEADER \ +RUN: --check-prefix LOGBENCH LOGHEADER-LABEL: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us) LOGBENCH: {{[0-9]+}}, NUMITERS1: AngryPhonebook,1 NUMITERS1-NOT: 0,0,0,0,0 LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} +```` + +### Verbose Mode -RUN: %Benchmark_O 1 Ackermann 1 | %FileCheck %s --check-prefix RUNJUSTONCE +```` +RUN: %Benchmark_O 1 Ackermann 1 AngryPhonebook --verbose --num-samples=2 \ +RUN: | %FileCheck %s --check-prefix RUNJUSTONCE \ +RUN: --check-prefix CONFIG \ +RUN: --check-prefix LOGVERBOSE +CONFIG: NumSamples: 2 +CONFIG: Tests Filter: ["1", "Ackermann", "1", "AngryPhonebook"] +CONFIG: Tests to run: Ackermann, AngryPhonebook +LOGVERBOSE-LABEL: Running Ackermann for 2 samples. +LOGVERBOSE: Measuring with scale {{[0-9]+}}. +LOGVERBOSE-NEXT: Sample 0,{{[0-9]+}} +LOGVERBOSE-NEXT: Measuring with scale {{[0-9]+}}. +LOGVERBOSE-NEXT: Sample 1,{{[0-9]+}} RUNJUSTONCE-LABEL: 1,Ackermann RUNJUSTONCE-NOT: 1,Ackermann +LOGVERBOSE-LABEL: Running AngryPhonebook for 2 samples. ```` From fce7a4551d0b2e446affc4fe1665f3dfbe89542b Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Tue, 17 Jul 2018 07:17:38 +0200 Subject: [PATCH 09/34] [benchmark][Gardening] BenchmarkInfo replaces Test The `Test`struct was forwarding everything to `BenchmarkInfo` and its only contribution was carrying of the index. Tuple is fine for that. --- benchmark/utils/DriverUtils.swift | 59 +++++++------------------------ 1 file changed, 12 insertions(+), 47 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 4b06e239c7703..6ac07367cc9b1 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -22,40 +22,6 @@ struct BenchResults { let sampleCount, min, max, mean, sd, median: UInt64 } -struct Test { - let benchInfo: BenchmarkInfo - let index: Int - - /// The name of the benchmark. - var name: String { - return benchInfo.name - } - - /// The "main routine" of the benchmark. - var runFunction: ((Int) -> ())? { - return benchInfo.runFunction - } - - /// The benchmark categories that this test belongs to. Used for filtering. - var tags: [BenchmarkCategory] { - return benchInfo.tags.sorted() - } - - /// An optional initialization function for a benchmark that is run before - /// measuring begins. Intended to be used to initialize global data used in - /// a benchmark. - var setUpFunction: (() -> ())? { - return benchInfo.setUpFunction - } - - /// An optional deinitialization function that if non-null is run /after/ a - /// measurement has been taken. - var tearDownFunction: (() -> ())? { - return benchInfo.tearDownFunction - } -} - -// We should migrate to a collection of BenchmarkInfo. public var registeredBenchmarks: [BenchmarkInfo] = [] enum TestAction { @@ -100,7 +66,7 @@ struct TestConfig { var afterRunSleep: Int? /// The list of tests to run. - var tests = [Test]() + var tests = [(index: Int, info: BenchmarkInfo)]() mutating func processArguments() -> TestAction { let validOptions = [ @@ -211,7 +177,7 @@ struct TestConfig { return benchmarkNamesOrIndices.contains(benchmark.name) || benchmarkNamesOrIndices.contains(String(indices[benchmark.name]!)) } - }.map { Test(benchInfo: $0, index: indices[$0.name]!) } + }.map { (index: indices[$0.name]!, info: $0) } } } @@ -315,7 +281,7 @@ class SampleRunner { } /// Invoke the benchmark entry point and return the run time in milliseconds. -func runBench(_ test: Test, _ c: TestConfig) -> BenchResults? { +func runBench(_ test: BenchmarkInfo, _ c: TestConfig) -> BenchResults? { var samples = [UInt64](repeating: 0, count: c.numSamples) // Before we do anything, check that we actually have a function to @@ -398,10 +364,7 @@ func printRunInfo(_ c: TestConfig) { } print("Tests Filter: \(c.filters)") print("Tests to run: ", terminator: "") - for t in c.tests { - print("\(t.name), ", terminator: "") - } - print("") + print(c.tests.map({ $0.1.name }).joined(separator: ", ")) print("") print("--- DATA ---") } @@ -418,13 +381,13 @@ func runBenchmarks(_ c: TestConfig) { var testCount = 0 - func report(_ t: Test, results: BenchResults?) { + func report(_ index: Int, _ t: BenchmarkInfo, results: BenchResults?) { func values(r: BenchResults) -> [String] { return [r.sampleCount, r.min, r.max, r.mean, r.sd, r.median] .map { String($0) } } let benchmarkStats = ( - [String(t.index), t.name] + (results.map(values) ?? ["Unsupported"]) + [String(index), t.name] + (results.map(values) ?? ["Unsupported"]) ).joined(separator: c.delim) print(benchmarkStats) @@ -435,8 +398,8 @@ func runBenchmarks(_ c: TestConfig) { } } - for t in c.tests { - report(t, results:runBench(t, c)) + for (index, test) in c.tests { + report(index, test, results:runBench(test, c)) } print("") @@ -458,8 +421,10 @@ public func main() { case .listTests: config.findTestsToRun() print("#\(config.delim)Test\(config.delim)[Tags]") - for t in config.tests { - print("\(t.index)\(config.delim)\(t.name)\(config.delim)\(t.tags)") + for (index, t) in config.tests { + let testDescription = [String(index), t.name, t.tags.sorted().description] + .joined(separator: config.delim) + print(testDescription) } case .run: config.findTestsToRun() From 0dbbc3dda70516554425a1fe79db7971ef4b3c18 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 13 Jul 2018 20:19:54 +0200 Subject: [PATCH 10/34] [benchmark][Gardening] Indices are String MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The indices (test numbers) are strings on both ends of the IO — in user input as well as when printed to the console. We can spare few conversions and just store them directly as `String`. --- benchmark/utils/DriverUtils.swift | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 6ac07367cc9b1..8e81646181685 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -66,7 +66,7 @@ struct TestConfig { var afterRunSleep: Int? /// The list of tests to run. - var tests = [(index: Int, info: BenchmarkInfo)]() + var tests = [(index: String, info: BenchmarkInfo)]() mutating func processArguments() -> TestAction { let validOptions = [ @@ -164,7 +164,8 @@ struct TestConfig { mutating func findTestsToRun() { registeredBenchmarks.sort() let indices = Dictionary(uniqueKeysWithValues: - zip(registeredBenchmarks.map{$0.name}, 1...)) + zip(registeredBenchmarks.map{ $0.name }, + (1...).lazy.map { String($0) } )) let benchmarkNamesOrIndices = Set(filters) // needed so we don't capture an ivar of a mutable inout self. let (_tags, _skipTags) = (tags, skipTags) @@ -175,7 +176,7 @@ struct TestConfig { benchmark.tags.isDisjoint(with: _skipTags) } else { return benchmarkNamesOrIndices.contains(benchmark.name) || - benchmarkNamesOrIndices.contains(String(indices[benchmark.name]!)) + benchmarkNamesOrIndices.contains(indices[benchmark.name]!) } }.map { (index: indices[$0.name]!, info: $0) } } @@ -381,13 +382,13 @@ func runBenchmarks(_ c: TestConfig) { var testCount = 0 - func report(_ index: Int, _ t: BenchmarkInfo, results: BenchResults?) { + func report(_ index: String, _ t: BenchmarkInfo, results: BenchResults?) { func values(r: BenchResults) -> [String] { return [r.sampleCount, r.min, r.max, r.mean, r.sd, r.median] .map { String($0) } } let benchmarkStats = ( - [String(index), t.name] + (results.map(values) ?? ["Unsupported"]) + [index, t.name] + (results.map(values) ?? ["Unsupported"]) ).joined(separator: c.delim) print(benchmarkStats) From c198f442d48634038357a7524a38f328d97b3f23 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Tue, 17 Jul 2018 07:22:19 +0200 Subject: [PATCH 11/34] [benchmark] Measure environment with rusage Measure and report system environment indicators during benchmark execution: * Memory usage with maximum resident set size (MAX_RSS) in bytes Proxy indicators of system load level: * Number of Involuntary Context Switches (ICS) * Number of Voluntary Context Switches (VCS) MAX_RSS delta is always reported in the last column of the log report. The `--verbose` mode additionaly reports full values measured before and after the benchmark execution as well as their delta for MAX_RSS, ICS and VCS. --- benchmark/utils/DriverUtils.swift | 41 +++++++++++++++++++++++++++--- test/benchmark/Benchmark_O.test.md | 10 +++++--- 2 files changed, 44 insertions(+), 7 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 8e81646181685..0f07287452044 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -19,7 +19,7 @@ import Darwin import TestsUtils struct BenchResults { - let sampleCount, min, max, mean, sd, median: UInt64 + let sampleCount, min, max, mean, sd, median, maxRSS: UInt64 } public var registeredBenchmarks: [BenchmarkInfo] = [] @@ -263,6 +263,37 @@ class Timer { class SampleRunner { let timer = Timer() + let baseline = SampleRunner.usage() + let c: TestConfig + + init(_ config: TestConfig) { + self.c = config + } + + private static func usage() -> rusage { + var u = rusage(); getrusage(RUSAGE_SELF, &u); return u + } + + /// Returns maximum resident set size (MAX_RSS) delta in bytes + func measureMemoryUsage() -> Int { + var current = SampleRunner.usage() + let maxRSS = current.ru_maxrss - baseline.ru_maxrss + + if c.verbose { + let pages = maxRSS / sysconf(_SC_PAGESIZE) + func deltaEquation(_ stat: KeyPath) -> String { + let b = baseline[keyPath: stat], c = current[keyPath: stat] + return "\(c) - \(b) = \(c - b)" + } + print(""" + MAX_RSS \(deltaEquation(\rusage.ru_maxrss)) (\(pages) pages) + ICS \(deltaEquation(\rusage.ru_nivcsw)) + VCS \(deltaEquation(\rusage.ru_nvcsw)) + """) + } + return maxRSS + } + func run(_ name: String, fn: (Int) -> Void, num_iters: UInt) -> UInt64 { // Start the timer. #if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER @@ -299,7 +330,7 @@ func runBench(_ test: BenchmarkInfo, _ c: TestConfig) -> BenchResults? { print("Running \(test.name) for \(c.numSamples) samples.") } - let sampler = SampleRunner() + let sampler = SampleRunner(c) for s in 0.. BenchResults? { // Return our benchmark results. return BenchResults(sampleCount: UInt64(samples.count), min: samples.min()!, max: samples.max()!, - mean: mean, sd: sd, median: internalMedian(samples)) + mean: mean, sd: sd, median: internalMedian(samples), + maxRSS: UInt64(sampler.measureMemoryUsage())) } func printRunInfo(_ c: TestConfig) { @@ -377,6 +409,7 @@ func runBenchmarks(_ c: TestConfig) { let header = ( ["#", "TEST", "SAMPLES"] + ["MIN", "MAX", "MEAN", "SD", "MEDIAN"].map(withUnit) + + ["MAX_RSS(B)"] ).joined(separator: c.delim) print(header) @@ -384,7 +417,7 @@ func runBenchmarks(_ c: TestConfig) { func report(_ index: String, _ t: BenchmarkInfo, results: BenchResults?) { func values(r: BenchResults) -> [String] { - return [r.sampleCount, r.min, r.max, r.mean, r.sd, r.median] + return [r.sampleCount, r.min, r.max, r.mean, r.sd, r.median, r.maxRSS] .map { String($0) } } let benchmarkStats = ( diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index b022c21bfbd21..18db69f424059 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -95,11 +95,11 @@ RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ RUN: | %FileCheck %s --check-prefix NUMITERS1 \ RUN: --check-prefix LOGHEADER \ RUN: --check-prefix LOGBENCH -LOGHEADER-LABEL: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us) +LOGHEADER-LABEL: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us),MAX_RSS(B) LOGBENCH: {{[0-9]+}}, NUMITERS1: AngryPhonebook,1 NUMITERS1-NOT: 0,0,0,0,0 -LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} +LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} ```` ### Verbose Mode @@ -108,7 +108,8 @@ LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} RUN: %Benchmark_O 1 Ackermann 1 AngryPhonebook --verbose --num-samples=2 \ RUN: | %FileCheck %s --check-prefix RUNJUSTONCE \ RUN: --check-prefix CONFIG \ -RUN: --check-prefix LOGVERBOSE +RUN: --check-prefix LOGVERBOSE \ +RUN: --check-prefix MEASUREENV CONFIG: NumSamples: 2 CONFIG: Tests Filter: ["1", "Ackermann", "1", "AngryPhonebook"] CONFIG: Tests to run: Ackermann, AngryPhonebook @@ -117,6 +118,9 @@ LOGVERBOSE: Measuring with scale {{[0-9]+}}. LOGVERBOSE-NEXT: Sample 0,{{[0-9]+}} LOGVERBOSE-NEXT: Measuring with scale {{[0-9]+}}. LOGVERBOSE-NEXT: Sample 1,{{[0-9]+}} +MEASUREENV: MAX_RSS {{[0-9]+}} - {{[0-9]+}} = {{[0-9]+}} ({{[0-9]+}} pages) +MEASUREENV: ICS {{[0-9]+}} - {{[0-9]+}} = {{[0-9]+}} +MEASUREENV: VCS {{[0-9]+}} - {{[0-9]+}} = {{[0-9]+}} RUNJUSTONCE-LABEL: 1,Ackermann RUNJUSTONCE-NOT: 1,Ackermann LOGVERBOSE-LABEL: Running AngryPhonebook for 2 samples. From 377ee464d26933127ba915e4ba8a84062aee945b Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Wed, 18 Jul 2018 14:54:01 +0200 Subject: [PATCH 12/34] [benchmark] Test error handling parsing arguments * Fix: flushing stdout before crashing to enable testing. * Added tests that verify reporting of errors when the parsing of command line arguments fails. --- benchmark/utils/DriverUtils.swift | 1 + test/benchmark/Benchmark_O.test.md | 54 ++++++++++++++++++++++++++++-- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 0f07287452044..e6eeee0ef3552 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -451,6 +451,7 @@ public func main() { } case let .fail(msg): // We do this since we need an autoclosure... + fflush(stdout) fatalError("\(msg)") case .listTests: config.findTestsToRun() diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index 18db69f424059..bdb2f7315342d 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -87,8 +87,8 @@ ORSKIPTAGS-NOT: RomanNumbers ```` ## Running Benchmarks -Each real benchmark execution takes about a second. If possible, multiple checks -are combined into one run to minimize the test time. +Each real benchmark execution takes about a second per sample. If possible, +multiple checks are combined into one run to minimize the test time. ```` RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ @@ -125,3 +125,53 @@ RUNJUSTONCE-LABEL: 1,Ackermann RUNJUSTONCE-NOT: 1,Ackermann LOGVERBOSE-LABEL: Running AngryPhonebook for 2 samples. ```` + +## Error Handling + +```` +RUN: not --crash %Benchmark_O --bogus 2>&1 \ +RUN: | %FileCheck %s --check-prefix ARGPARSE \ +RUN: --check-prefix CRASH +ARGPARSE: Invalid option: --bogus +CRASH: Fatal error: +ARGPARSE: Failed to parse arguments +CRASH: Illegal instruction: 4 + +RUN: not --crash %Benchmark_O --iter-scale \ +RUN: 2>&1 | %FileCheck %s --check-prefix NOVALUE +NOVALUE: --iter-scale requires a value + +RUN: not --crash %Benchmark_O --iter-scale= \ +RUN: 2>&1 | %FileCheck %s --check-prefix EMPTYVAL +EMPTYVAL: --iter-scale requires a value + +RUN: not --crash %Benchmark_O --iter-scale=NaN \ +RUN: 2>&1 | %FileCheck %s --check-prefix NANVALUE +NANVALUE: Illegal instruction + +RUN: not --crash %Benchmark_O --num-iters \ +RUN: 2>&1 | %FileCheck %s --check-prefix NUMITERS +NUMITERS: --num-iters requires a value + +RUN: not --crash %Benchmark_O --num-samples \ +RUN: 2>&1 | %FileCheck %s --check-prefix NUMSAMPLES +NUMSAMPLES: --num-samples requires a value + +RUN: not --crash %Benchmark_O --sleep \ +RUN: 2>&1 | %FileCheck %s --check-prefix SLEEP +SLEEP: --sleep requires a +SLEEP-SAME: value + +RUN: not --crash %Benchmark_O --delim \ +RUN: 2>&1 | %FileCheck %s --check-prefix DELIM +DELIM: --delim requires a value + +RUN: not --crash %Benchmark_O --tags=bogus \ +RUN: 2>&1 | %FileCheck %s --check-prefix BADTAG +BADTAG: Unknown benchmark category: 'bogus' + +RUN: not --crash %Benchmark_O --skip-tags=bogus \ +RUN: 2>&1 | %FileCheck %s --check-prefix BADSKIPTAG +BADSKIPTAG: Unknown benchmark category: 'bogus' + +```` From dd228d71819c89160a4a6ad92b2a4b3daf8b3a0c Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 13 Jul 2018 17:11:05 +0200 Subject: [PATCH 13/34] [benchmark][Gardening] guard benchArgs --- benchmark/utils/DriverUtils.swift | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index e6eeee0ef3552..41e2a0caced3d 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -74,11 +74,9 @@ struct TestConfig { "--verbose", "--delim", "--list", "--sleep", "--tags", "--skip-tags", "--help" ] - let maybeBenchArgs: Arguments? = parseArgs(validOptions) - if maybeBenchArgs == nil { + guard let benchArgs = parseArgs(validOptions) else { return .fail("Failed to parse arguments") } - let benchArgs = maybeBenchArgs! filters = benchArgs.positionalArgs From 371f155258431a2c00a0d256e701dab357356096 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Wed, 18 Jul 2018 20:53:58 +0200 Subject: [PATCH 14/34] [benchmark] Exit gracefully on argument errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactored to use Swift’s idiomatic error handling. In case of invalid argument errors, the message is printed to `stderr` and we exit gracefully with error code 1. We no longer crash the app in most cases. --- benchmark/utils/ArgParse.swift | 16 ++++++++ benchmark/utils/DriverUtils.swift | 65 +++++++++++++----------------- test/benchmark/Benchmark_O.test.md | 23 +++++------ 3 files changed, 55 insertions(+), 49 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index 96b290d015787..d91e338cef714 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -24,6 +24,22 @@ public struct Arguments { } } +enum ArgumentError: Error { + case missingValue(String) + case general(String) +} + +extension ArgumentError: CustomStringConvertible { + public var description: String { + switch self { + case let .missingValue(key): + return "\(key) requires a value" //"Missing value for `\(key)`" + case let .general(description): + return "\(description)" + } + } +} + /// Using CommandLine.arguments, returns an Arguments struct describing /// the arguments to this program. If we fail to parse arguments, we /// return nil. diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 41e2a0caced3d..021d474040c1a 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -27,7 +27,6 @@ public var registeredBenchmarks: [BenchmarkInfo] = [] enum TestAction { case run case listTests - case fail(String) case help([String]) } @@ -68,14 +67,14 @@ struct TestConfig { /// The list of tests to run. var tests = [(index: String, info: BenchmarkInfo)]() - mutating func processArguments() -> TestAction { + mutating func processArguments() throws -> TestAction { let validOptions = [ "--iter-scale", "--num-samples", "--num-iters", "--verbose", "--delim", "--list", "--sleep", "--tags", "--skip-tags", "--help" ] guard let benchArgs = parseArgs(validOptions) else { - return .fail("Failed to parse arguments") + throw ArgumentError.general("Failed to parse arguments") } filters = benchArgs.positionalArgs @@ -85,17 +84,17 @@ struct TestConfig { } if let x = benchArgs.optionalArgsMap["--iter-scale"] { - if x.isEmpty { return .fail("--iter-scale requires a value") } + if x.isEmpty { throw ArgumentError.missingValue("--iter-scale") } iterationScale = Int(x)! } if let x = benchArgs.optionalArgsMap["--num-iters"] { - if x.isEmpty { return .fail("--num-iters requires a value") } + if x.isEmpty { throw ArgumentError.missingValue("--num-iters") } fixedNumIters = numericCast(Int(x)!) } if let x = benchArgs.optionalArgsMap["--num-samples"] { - if x.isEmpty { return .fail("--num-samples requires a value") } + if x.isEmpty { throw ArgumentError.missingValue("--num-samples") } numSamples = Int(x)! } @@ -105,25 +104,23 @@ struct TestConfig { } if let x = benchArgs.optionalArgsMap["--delim"] { - if x.isEmpty { return .fail("--delim requires a value") } + if x.isEmpty { throw ArgumentError.missingValue("--delim") } delim = x } - if let x = benchArgs.optionalArgsMap["--tags"] { - if x.isEmpty { return .fail("--tags requires a value") } + func parseCategory(tag: String) throws -> BenchmarkCategory { + guard let category = BenchmarkCategory(rawValue: tag) else { + throw ArgumentError.general("Unknown benchmark category: '\(tag)'") + } + return category + } + if let x = benchArgs.optionalArgsMap["--tags"] { + if x.isEmpty { throw ArgumentError.missingValue("--tags") } // We support specifying multiple tags by splitting on comma, i.e.: - // // --tags=Array,Dictionary - // - // FIXME: If we used Error instead of .fail, then we could have a cleaner - // impl here using map on x and tags.formUnion. - for t in x.split(separator: ",") { - guard let cat = BenchmarkCategory(rawValue: String(t)) else { - return .fail("Unknown benchmark category: '\(t)'") - } - tags.insert(cat) - } + tags.formUnion( + try x.split(separator: ",").map(String.init).map(parseCategory)) } if let x = benchArgs.optionalArgsMap["--skip-tags"] { @@ -132,22 +129,14 @@ struct TestConfig { skipTags = [] // We support specifying multiple tags by splitting on comma, i.e.: - // // --skip-tags=Array,Set,unstable,skip - // - // FIXME: If we used Error instead of .fail, then we could have a cleaner - // impl here using map on x and tags.formUnion. - for t in x.split(separator: ",") { - guard let cat = BenchmarkCategory(rawValue: String(t)) else { - return .fail("Unknown benchmark category: '\(t)'") - } - skipTags.insert(cat) - } + skipTags.formUnion( + try x.split(separator: ",").map(String.init).map(parseCategory)) } if let x = benchArgs.optionalArgsMap["--sleep"] { guard let v = Int(x) else { - return .fail("--sleep requires a non-empty integer value") + throw ArgumentError.missingValue("--sleep") } afterRunSleep = v } @@ -440,17 +429,13 @@ func runBenchmarks(_ c: TestConfig) { public func main() { var config = TestConfig() - - switch (config.processArguments()) { + do { + switch (try config.processArguments()) { case let .help(validOptions): print("Valid options:") for v in validOptions { print(" \(v)") } - case let .fail(msg): - // We do this since we need an autoclosure... - fflush(stdout) - fatalError("\(msg)") case .listTests: config.findTestsToRun() print("#\(config.delim)Test\(config.delim)[Tags]") @@ -466,5 +451,13 @@ public func main() { if let x = config.afterRunSleep { sleep(UInt32(x)) } + } + } catch let error as ArgumentError { + fflush(stdout) + fputs(error.description, stderr) + fflush(stderr) + exit(1) + } catch { + fatalError("\(error)") } } diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index bdb2f7315342d..f3926f7e07481 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -129,19 +129,16 @@ LOGVERBOSE-LABEL: Running AngryPhonebook for 2 samples. ## Error Handling ```` -RUN: not --crash %Benchmark_O --bogus 2>&1 \ -RUN: | %FileCheck %s --check-prefix ARGPARSE \ -RUN: --check-prefix CRASH +RUN: not %Benchmark_O --bogus 2>&1 \ +RUN: | %FileCheck %s --check-prefix ARGPARSE ARGPARSE: Invalid option: --bogus -CRASH: Fatal error: ARGPARSE: Failed to parse arguments -CRASH: Illegal instruction: 4 -RUN: not --crash %Benchmark_O --iter-scale \ +RUN: not %Benchmark_O --iter-scale \ RUN: 2>&1 | %FileCheck %s --check-prefix NOVALUE NOVALUE: --iter-scale requires a value -RUN: not --crash %Benchmark_O --iter-scale= \ +RUN: not %Benchmark_O --iter-scale= \ RUN: 2>&1 | %FileCheck %s --check-prefix EMPTYVAL EMPTYVAL: --iter-scale requires a value @@ -149,28 +146,28 @@ RUN: not --crash %Benchmark_O --iter-scale=NaN \ RUN: 2>&1 | %FileCheck %s --check-prefix NANVALUE NANVALUE: Illegal instruction -RUN: not --crash %Benchmark_O --num-iters \ +RUN: not %Benchmark_O --num-iters \ RUN: 2>&1 | %FileCheck %s --check-prefix NUMITERS NUMITERS: --num-iters requires a value -RUN: not --crash %Benchmark_O --num-samples \ +RUN: not %Benchmark_O --num-samples \ RUN: 2>&1 | %FileCheck %s --check-prefix NUMSAMPLES NUMSAMPLES: --num-samples requires a value -RUN: not --crash %Benchmark_O --sleep \ +RUN: not %Benchmark_O --sleep \ RUN: 2>&1 | %FileCheck %s --check-prefix SLEEP SLEEP: --sleep requires a SLEEP-SAME: value -RUN: not --crash %Benchmark_O --delim \ +RUN: not %Benchmark_O --delim \ RUN: 2>&1 | %FileCheck %s --check-prefix DELIM DELIM: --delim requires a value -RUN: not --crash %Benchmark_O --tags=bogus \ +RUN: not %Benchmark_O --tags=bogus \ RUN: 2>&1 | %FileCheck %s --check-prefix BADTAG BADTAG: Unknown benchmark category: 'bogus' -RUN: not --crash %Benchmark_O --skip-tags=bogus \ +RUN: not %Benchmark_O --skip-tags=bogus \ RUN: 2>&1 | %FileCheck %s --check-prefix BADSKIPTAG BADSKIPTAG: Unknown benchmark category: 'bogus' From 647376c55feea67c66c6d84720644206089ce797 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Thu, 19 Jul 2018 10:30:49 +0200 Subject: [PATCH 15/34] [benchmark][Gardening] Extract optionalArg func --- benchmark/utils/DriverUtils.swift | 37 ++++++++++++------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 021d474040c1a..15a52b70d8db1 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -83,30 +83,22 @@ struct TestConfig { return .help(validOptions) } - if let x = benchArgs.optionalArgsMap["--iter-scale"] { - if x.isEmpty { throw ArgumentError.missingValue("--iter-scale") } - iterationScale = Int(x)! - } - - if let x = benchArgs.optionalArgsMap["--num-iters"] { - if x.isEmpty { throw ArgumentError.missingValue("--num-iters") } - fixedNumIters = numericCast(Int(x)!) - } - - if let x = benchArgs.optionalArgsMap["--num-samples"] { - if x.isEmpty { throw ArgumentError.missingValue("--num-samples") } - numSamples = Int(x)! + func optionalArg(_ name: String, _ action: (String) throws -> Void) throws { + if let value = benchArgs.optionalArgsMap[name] { + guard !value.isEmpty else { throw ArgumentError.missingValue(name) } + try action(value) + } } + try optionalArg("--iter-scale") { iterationScale = Int($0)! } + try optionalArg("--num-iters") { fixedNumIters = UInt($0)! } + try optionalArg("--num-samples") { numSamples = Int($0)! } if let _ = benchArgs.optionalArgsMap["--verbose"] { verbose = true print("Verbose") } - if let x = benchArgs.optionalArgsMap["--delim"] { - if x.isEmpty { throw ArgumentError.missingValue("--delim") } - delim = x - } + try optionalArg("--delim") { delim = $0 } func parseCategory(tag: String) throws -> BenchmarkCategory { guard let category = BenchmarkCategory(rawValue: tag) else { @@ -115,22 +107,21 @@ struct TestConfig { return category } - if let x = benchArgs.optionalArgsMap["--tags"] { - if x.isEmpty { throw ArgumentError.missingValue("--tags") } + try optionalArg("--tags") { // We support specifying multiple tags by splitting on comma, i.e.: // --tags=Array,Dictionary - tags.formUnion( - try x.split(separator: ",").map(String.init).map(parseCategory)) + tags = Set( + try $0.split(separator: ",").map(String.init).map(parseCategory)) } if let x = benchArgs.optionalArgsMap["--skip-tags"] { // if the --skip-tags parameter is specified, we need to ignore the // default and start from a clean slate. - skipTags = [] + // If the parameter's value is empty, $0.split maps into [] // We support specifying multiple tags by splitting on comma, i.e.: // --skip-tags=Array,Set,unstable,skip - skipTags.formUnion( + skipTags = Set( try x.split(separator: ",").map(String.init).map(parseCategory)) } From 7d19a03dcec5aff17b18a34cb1aa9ab40e534aad Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Thu, 19 Jul 2018 12:06:19 +0200 Subject: [PATCH 16/34] [benchmark] Gracefully type-check attribute values We no longer crash when the argument value parsing fails, but report an error. --- benchmark/utils/ArgParse.swift | 5 +++++ benchmark/utils/DriverUtils.swift | 35 ++++++++++++++++-------------- test/benchmark/Benchmark_O.test.md | 11 +++++----- 3 files changed, 29 insertions(+), 22 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index d91e338cef714..01240bbf20d2c 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -26,6 +26,7 @@ public struct Arguments { enum ArgumentError: Error { case missingValue(String) + case invalidType(value: String, type: String, argument: String?) case general(String) } @@ -34,6 +35,10 @@ extension ArgumentError: CustomStringConvertible { switch self { case let .missingValue(key): return "\(key) requires a value" //"Missing value for `\(key)`" + case let .invalidType(value, type, argument): + return (argument == nil) + ? "'\(value)' is not a valid '\(type)'" + : "'\(value)' is not a valid '\(type)' for '\(argument!)'" case let .general(description): return "\(description)" } diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 15a52b70d8db1..9ca8748aff584 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -83,35 +83,43 @@ struct TestConfig { return .help(validOptions) } - func optionalArg(_ name: String, _ action: (String) throws -> Void) throws { + func optionalArg( + _ name: String, + _ property: WritableKeyPath, + parser parse: (String) throws -> T?) + throws { if let value = benchArgs.optionalArgsMap[name] { guard !value.isEmpty else { throw ArgumentError.missingValue(name) } - try action(value) + guard let typedValue = try parse(value) else { + throw ArgumentError.invalidType( + value: value, type: String(describing: T.self), argument: name) + } + self[keyPath: property] = typedValue } } - try optionalArg("--iter-scale") { iterationScale = Int($0)! } - try optionalArg("--num-iters") { fixedNumIters = UInt($0)! } - try optionalArg("--num-samples") { numSamples = Int($0)! } + try optionalArg("--iter-scale", \.iterationScale) { Int($0) } + try optionalArg("--num-iters", \.fixedNumIters) { UInt($0) } + try optionalArg("--num-samples", \.numSamples) { Int($0) } if let _ = benchArgs.optionalArgsMap["--verbose"] { verbose = true print("Verbose") } - try optionalArg("--delim") { delim = $0 } + try optionalArg("--delim", \.delim) { $0 } func parseCategory(tag: String) throws -> BenchmarkCategory { guard let category = BenchmarkCategory(rawValue: tag) else { - throw ArgumentError.general("Unknown benchmark category: '\(tag)'") + throw ArgumentError.invalidType( + value: tag, type: "BenchmarkCategory", argument: nil) } return category } - try optionalArg("--tags") { + try optionalArg("--tags", \.tags) { // We support specifying multiple tags by splitting on comma, i.e.: // --tags=Array,Dictionary - tags = Set( - try $0.split(separator: ",").map(String.init).map(parseCategory)) + Set(try $0.split(separator: ",").map(String.init).map(parseCategory)) } if let x = benchArgs.optionalArgsMap["--skip-tags"] { @@ -125,12 +133,7 @@ struct TestConfig { try x.split(separator: ",").map(String.init).map(parseCategory)) } - if let x = benchArgs.optionalArgsMap["--sleep"] { - guard let v = Int(x) else { - throw ArgumentError.missingValue("--sleep") - } - afterRunSleep = v - } + try optionalArg("--sleep", \.afterRunSleep) { Int($0) } if let _ = benchArgs.optionalArgsMap["--list"] { return .listTests diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index f3926f7e07481..4927f09a104fa 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -142,9 +142,9 @@ RUN: not %Benchmark_O --iter-scale= \ RUN: 2>&1 | %FileCheck %s --check-prefix EMPTYVAL EMPTYVAL: --iter-scale requires a value -RUN: not --crash %Benchmark_O --iter-scale=NaN \ +RUN: not %Benchmark_O --iter-scale=NaN \ RUN: 2>&1 | %FileCheck %s --check-prefix NANVALUE -NANVALUE: Illegal instruction +NANVALUE: 'NaN' is not a valid 'Int' for '--iter-scale' RUN: not %Benchmark_O --num-iters \ RUN: 2>&1 | %FileCheck %s --check-prefix NUMITERS @@ -156,8 +156,7 @@ NUMSAMPLES: --num-samples requires a value RUN: not %Benchmark_O --sleep \ RUN: 2>&1 | %FileCheck %s --check-prefix SLEEP -SLEEP: --sleep requires a -SLEEP-SAME: value +SLEEP: --sleep requires a value RUN: not %Benchmark_O --delim \ RUN: 2>&1 | %FileCheck %s --check-prefix DELIM @@ -165,10 +164,10 @@ DELIM: --delim requires a value RUN: not %Benchmark_O --tags=bogus \ RUN: 2>&1 | %FileCheck %s --check-prefix BADTAG -BADTAG: Unknown benchmark category: 'bogus' +BADTAG: 'bogus' is not a valid 'BenchmarkCategory' RUN: not %Benchmark_O --skip-tags=bogus \ RUN: 2>&1 | %FileCheck %s --check-prefix BADSKIPTAG -BADSKIPTAG: Unknown benchmark category: 'bogus' +BADSKIPTAG: 'bogus' is not a valid 'BenchmarkCategory' ```` From 2e2848b2152771351ce53a4ffdf943146f503ce2 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Thu, 19 Jul 2018 17:31:46 +0200 Subject: [PATCH 17/34] [benchmark][Gardening] Arguments w/ optional value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added handling for arguments that don’t have a value (flags), or whose values are optional (they can be empty). This covers all the argument types currently used in Benchmark_O with the single `optionalArg` function. --- benchmark/utils/DriverUtils.swift | 46 ++++++++++++------------------- 1 file changed, 17 insertions(+), 29 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 9ca8748aff584..0455ea9b6c006 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -67,6 +67,8 @@ struct TestConfig { /// The list of tests to run. var tests = [(index: String, info: BenchmarkInfo)]() + var action: TestAction = .run + mutating func processArguments() throws -> TestAction { let validOptions = [ "--iter-scale", "--num-samples", "--num-iters", @@ -79,33 +81,28 @@ struct TestConfig { filters = benchArgs.positionalArgs - if benchArgs.optionalArgsMap["--help"] != nil { - return .help(validOptions) - } - func optionalArg( _ name: String, _ property: WritableKeyPath, - parser parse: (String) throws -> T?) - throws { + defaultValue: T? = nil, + parser parse: (String) throws -> T? = { _ in nil } + ) throws { if let value = benchArgs.optionalArgsMap[name] { - guard !value.isEmpty else { throw ArgumentError.missingValue(name) } - guard let typedValue = try parse(value) else { + guard !value.isEmpty || defaultValue != nil + else { throw ArgumentError.missingValue(name) } + guard let typedValue = (value.isEmpty) ? defaultValue + : try parse(value) else { throw ArgumentError.invalidType( value: value, type: String(describing: T.self), argument: name) } self[keyPath: property] = typedValue } } + try optionalArg("--iter-scale", \.iterationScale) { Int($0) } try optionalArg("--num-iters", \.fixedNumIters) { UInt($0) } try optionalArg("--num-samples", \.numSamples) { Int($0) } - - if let _ = benchArgs.optionalArgsMap["--verbose"] { - verbose = true - print("Verbose") - } - + try optionalArg("--verbose", \.verbose, defaultValue: true) try optionalArg("--delim", \.delim) { $0 } func parseCategory(tag: String) throws -> BenchmarkCategory { @@ -121,25 +118,16 @@ struct TestConfig { // --tags=Array,Dictionary Set(try $0.split(separator: ",").map(String.init).map(parseCategory)) } - - if let x = benchArgs.optionalArgsMap["--skip-tags"] { - // if the --skip-tags parameter is specified, we need to ignore the - // default and start from a clean slate. - // If the parameter's value is empty, $0.split maps into [] - + try optionalArg("--skip-tags", \.skipTags, defaultValue: []) { // We support specifying multiple tags by splitting on comma, i.e.: // --skip-tags=Array,Set,unstable,skip - skipTags = Set( - try x.split(separator: ",").map(String.init).map(parseCategory)) + Set(try $0.split(separator: ",").map(String.init).map(parseCategory)) } - try optionalArg("--sleep", \.afterRunSleep) { Int($0) } + try optionalArg("--list", \.action, defaultValue: .listTests) + try optionalArg("--help", \.action, defaultValue: .help(validOptions)) - if let _ = benchArgs.optionalArgsMap["--list"] { - return .listTests - } - - return .run + return action } mutating func findTestsToRun() { @@ -448,7 +436,7 @@ public func main() { } } catch let error as ArgumentError { fflush(stdout) - fputs(error.description, stderr) + fputs("\(error)\n", stderr) fflush(stderr) exit(1) } catch { From 743c7ef15f94e5fe79c9a549e5aae4033ca17ab8 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Thu, 19 Jul 2018 22:00:30 +0200 Subject: [PATCH 18/34] [benchmark][Gardening] Grouped argument parsing * Extracted tag parser * Reordered all parsing of optional arguments to be grouped together. --- benchmark/utils/DriverUtils.swift | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 0455ea9b6c006..a21625cfa6f1b 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -99,30 +99,28 @@ struct TestConfig { } } - try optionalArg("--iter-scale", \.iterationScale) { Int($0) } - try optionalArg("--num-iters", \.fixedNumIters) { UInt($0) } - try optionalArg("--num-samples", \.numSamples) { Int($0) } - try optionalArg("--verbose", \.verbose, defaultValue: true) - try optionalArg("--delim", \.delim) { $0 } - - func parseCategory(tag: String) throws -> BenchmarkCategory { + func tag(tag: String) throws -> BenchmarkCategory { guard let category = BenchmarkCategory(rawValue: tag) else { throw ArgumentError.invalidType( value: tag, type: "BenchmarkCategory", argument: nil) } return category } - - try optionalArg("--tags", \.tags) { + func tags(tags: String) throws -> Set { // We support specifying multiple tags by splitting on comma, i.e.: // --tags=Array,Dictionary - Set(try $0.split(separator: ",").map(String.init).map(parseCategory)) - } - try optionalArg("--skip-tags", \.skipTags, defaultValue: []) { - // We support specifying multiple tags by splitting on comma, i.e.: // --skip-tags=Array,Set,unstable,skip - Set(try $0.split(separator: ",").map(String.init).map(parseCategory)) + return Set( + try tags.split(separator: ",").map(String.init).map(tag)) } + + try optionalArg("--iter-scale", \.iterationScale) { Int($0) } + try optionalArg("--num-iters", \.fixedNumIters) { UInt($0) } + try optionalArg("--num-samples", \.numSamples) { Int($0) } + try optionalArg("--verbose", \.verbose, defaultValue: true) + try optionalArg("--delim", \.delim) { $0 } + try optionalArg("--tags", \.tags, parser: tags) + try optionalArg("--skip-tags", \.skipTags, defaultValue: [], parser: tags) try optionalArg("--sleep", \.afterRunSleep) { Int($0) } try optionalArg("--list", \.action, defaultValue: .listTests) try optionalArg("--help", \.action, defaultValue: .help(validOptions)) From f2c4262ef09dd63dd954979fef1f6598243737f2 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 05:58:34 +0200 Subject: [PATCH 19/34] [becnhmark][Gardening] Extracted checked function --- benchmark/utils/DriverUtils.swift | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index a21625cfa6f1b..95f8fbc790117 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -81,6 +81,16 @@ struct TestConfig { filters = benchArgs.positionalArgs + func checked( + _ parse: (String) throws -> T?, + _ value: String, + argument: String? = nil + ) throws -> T { + if let t = try parse(value) { return t } + throw ArgumentError.invalidType( + value: value, type: "\(T.self)", argument: argument) + } + func optionalArg( _ name: String, _ property: WritableKeyPath, @@ -90,28 +100,20 @@ struct TestConfig { if let value = benchArgs.optionalArgsMap[name] { guard !value.isEmpty || defaultValue != nil else { throw ArgumentError.missingValue(name) } - guard let typedValue = (value.isEmpty) ? defaultValue - : try parse(value) else { - throw ArgumentError.invalidType( - value: value, type: String(describing: T.self), argument: name) - } - self[keyPath: property] = typedValue - } - } - func tag(tag: String) throws -> BenchmarkCategory { - guard let category = BenchmarkCategory(rawValue: tag) else { - throw ArgumentError.invalidType( - value: tag, type: "BenchmarkCategory", argument: nil) + self[keyPath: property] = (value.isEmpty) + ? defaultValue! + : try checked(parse, value, argument:name) } - return category } + func tags(tags: String) throws -> Set { // We support specifying multiple tags by splitting on comma, i.e.: // --tags=Array,Dictionary // --skip-tags=Array,Set,unstable,skip return Set( - try tags.split(separator: ",").map(String.init).map(tag)) + try tags.split(separator: ",").map(String.init).map { + try checked({ BenchmarkCategory(rawValue: $0) }, $0) }) } try optionalArg("--iter-scale", \.iterationScale) { Int($0) } From b76c946fb9f53dd8fc8756882e783bcdbd5f4c40 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 07:15:30 +0200 Subject: [PATCH 20/34] [benchmark][Gardening] `processArguments` is init Processing command line arguments is an integral part of `TestConfig` initialization. Make it explicit. --- benchmark/utils/DriverUtils.swift | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 95f8fbc790117..99dda5b0d423b 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -69,7 +69,7 @@ struct TestConfig { var action: TestAction = .run - mutating func processArguments() throws -> TestAction { + init() throws { let validOptions = [ "--iter-scale", "--num-samples", "--num-iters", "--verbose", "--delim", "--list", "--sleep", @@ -127,7 +127,6 @@ struct TestConfig { try optionalArg("--list", \.action, defaultValue: .listTests) try optionalArg("--help", \.action, defaultValue: .help(validOptions)) - return action } mutating func findTestsToRun() { @@ -410,9 +409,9 @@ func runBenchmarks(_ c: TestConfig) { } public func main() { - var config = TestConfig() do { - switch (try config.processArguments()) { + var config = try TestConfig() + switch (config.action) { case let .help(validOptions): print("Valid options:") for v in validOptions { From 50575b1cff85a76d09a00465bfb1e2cfc74d1c09 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 12:57:21 +0200 Subject: [PATCH 21/34] [benchmark][Gardening] Introduce PartialTestConfig MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving towards immutable TestConfig. The pattern used is inspired by the`PartialBenchmarkConfig` from [criterion.rs](https://github.com/japaric/criterion.rs/blob/master/src/benchmark.rs). The idea is to have a temporary mutable struct which collects the command line arguments and is filled by the “parser” (which will be extracted later). The partial configuration is used to initialize the immutable `TestConfig`. The optionality of the command line parameters is represented by the corresponding properties being `Optional`. (Accidentaly, all our arguments are optional… but that’s beside the point.) Null coalescing operator is used to fill in the defaults during initialization. For some reason, the compiler found `optionalArg` calls for `tags` and `skip-tags` ambiguous, when types changed to `Set?`, which was resolved by providing fully qualified key paths for them (`\PartialTestConfig.tags`). --- benchmark/utils/DriverUtils.swift | 60 +++++++++++++++++++++++-------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 99dda5b0d423b..755062301e6f7 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -32,44 +32,56 @@ enum TestAction { struct TestConfig { /// The delimiter to use when printing output. - var delim: String = "," + let delim: String /// The filters applied to our test names. - var filters = [String]() + let filters: [String] /// The tags that we want to run - var tags = Set() + let tags: Set /// Tests tagged with any of these will not be executed - var skipTags: Set = [.unstable, .skip] + let skipTags: Set /// The scalar multiple of the amount of times a test should be run. This /// enables one to cause tests to run for N iterations longer than they /// normally would. This is useful when one wishes for a test to run for a /// longer amount of time to perform performance analysis on the test in /// instruments. - var iterationScale: Int = 1 + let iterationScale: Int /// If we are asked to have a fixed number of iterations, the number of fixed /// iterations. - var fixedNumIters: UInt = 0 + let fixedNumIters: UInt /// The number of samples we should take of each test. - var numSamples: Int = 1 + let numSamples: Int /// Is verbose output enabled? - var verbose: Bool = false + let verbose: Bool /// After we run the tests, should the harness sleep to allow for utilities /// like leaks that require a PID to run on the test harness. - var afterRunSleep: Int? + let afterRunSleep: Int? /// The list of tests to run. var tests = [(index: String, info: BenchmarkInfo)]() - var action: TestAction = .run + let action: TestAction init() throws { + + struct PartialTestConfig { + var delim: String? + var filters: [String]? + var tags, skipTags: Set? + var iterationScale, numSamples, afterRunSleep: Int? + var fixedNumIters: UInt? + var verbose: Bool? + var action: TestAction? + } + var c = PartialTestConfig() + let validOptions = [ "--iter-scale", "--num-samples", "--num-iters", "--verbose", "--delim", "--list", "--sleep", @@ -87,13 +99,19 @@ struct TestConfig { argument: String? = nil ) throws -> T { if let t = try parse(value) { return t } + var type = "\(T.self)" + if type.starts(with: "Optional<") { + let s = type.index(after: type.index(of:"<")!) + let e = type.index(before: type.endIndex) // ">" + type = String(type[s.. + } throw ArgumentError.invalidType( - value: value, type: "\(T.self)", argument: argument) + value: value, type: type, argument: argument) } func optionalArg( _ name: String, - _ property: WritableKeyPath, + _ property: WritableKeyPath, defaultValue: T? = nil, parser parse: (String) throws -> T? = { _ in nil } ) throws { @@ -101,7 +119,7 @@ struct TestConfig { guard !value.isEmpty || defaultValue != nil else { throw ArgumentError.missingValue(name) } - self[keyPath: property] = (value.isEmpty) + c[keyPath: property] = (value.isEmpty) ? defaultValue! : try checked(parse, value, argument:name) } @@ -121,12 +139,24 @@ struct TestConfig { try optionalArg("--num-samples", \.numSamples) { Int($0) } try optionalArg("--verbose", \.verbose, defaultValue: true) try optionalArg("--delim", \.delim) { $0 } - try optionalArg("--tags", \.tags, parser: tags) - try optionalArg("--skip-tags", \.skipTags, defaultValue: [], parser: tags) + try optionalArg("--tags", \PartialTestConfig.tags, parser: tags) + try optionalArg("--skip-tags", \PartialTestConfig.skipTags, + defaultValue: [], parser: tags) try optionalArg("--sleep", \.afterRunSleep) { Int($0) } try optionalArg("--list", \.action, defaultValue: .listTests) try optionalArg("--help", \.action, defaultValue: .help(validOptions)) + // Configure from the command line arguments, filling in the defaults. + delim = c.delim ?? "," + self.tags = c.tags ?? [] + skipTags = c.skipTags ?? [.unstable, .skip] + iterationScale = c.iterationScale ?? 1 + fixedNumIters = c.fixedNumIters ?? 0 + numSamples = c.numSamples ?? 1 + verbose = c.verbose ?? false + afterRunSleep = c.afterRunSleep + action = c.action ?? .run + // TODO: filters, tests } mutating func findTestsToRun() { From bfa198b9520e586ab39f4253a373a5bb871792dc Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 14:51:39 +0200 Subject: [PATCH 22/34] [benchmark][Gardening] Immutable `TestConfig` The `TestConfig` is now completely immutable. Arguments that are not necessary for running benchmarks were moved inside the config initialization, which also subsumed the `printRunInfo` function for displaying the configuration details in verbose mode. --- benchmark/utils/DriverUtils.swift | 92 +++++++++++++++---------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 755062301e6f7..53d638c586b69 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -34,15 +34,6 @@ struct TestConfig { /// The delimiter to use when printing output. let delim: String - /// The filters applied to our test names. - let filters: [String] - - /// The tags that we want to run - let tags: Set - - /// Tests tagged with any of these will not be executed - let skipTags: Set - /// The scalar multiple of the amount of times a test should be run. This /// enables one to cause tests to run for N iterations longer than they /// normally would. This is useful when one wishes for a test to run for a @@ -65,20 +56,20 @@ struct TestConfig { let afterRunSleep: Int? /// The list of tests to run. - var tests = [(index: String, info: BenchmarkInfo)]() + let tests: [(index: String, info: BenchmarkInfo)] let action: TestAction - init() throws { + init(_ registeredBenchmarks: [BenchmarkInfo]) throws { struct PartialTestConfig { var delim: String? - var filters: [String]? var tags, skipTags: Set? var iterationScale, numSamples, afterRunSleep: Int? var fixedNumIters: UInt? var verbose: Bool? var action: TestAction? + var filters: [String]? } var c = PartialTestConfig() @@ -91,8 +82,6 @@ struct TestConfig { throw ArgumentError.general("Failed to parse arguments") } - filters = benchArgs.positionalArgs - func checked( _ parse: (String) throws -> T?, _ value: String, @@ -134,6 +123,7 @@ struct TestConfig { try checked({ BenchmarkCategory(rawValue: $0) }, $0) }) } + // Parse command line arguments try optionalArg("--iter-scale", \.iterationScale) { Int($0) } try optionalArg("--num-iters", \.fixedNumIters) { UInt($0) } try optionalArg("--num-samples", \.numSamples) { Int($0) } @@ -146,32 +136,62 @@ struct TestConfig { try optionalArg("--list", \.action, defaultValue: .listTests) try optionalArg("--help", \.action, defaultValue: .help(validOptions)) + c.filters = benchArgs.positionalArgs + // Configure from the command line arguments, filling in the defaults. delim = c.delim ?? "," - self.tags = c.tags ?? [] - skipTags = c.skipTags ?? [.unstable, .skip] iterationScale = c.iterationScale ?? 1 fixedNumIters = c.fixedNumIters ?? 0 numSamples = c.numSamples ?? 1 verbose = c.verbose ?? false afterRunSleep = c.afterRunSleep action = c.action ?? .run - // TODO: filters, tests + tests = TestConfig.filterTests(registeredBenchmarks, + filters: c.filters ?? [], + tags: c.tags ?? [], + skipTags: c.skipTags ?? [.unstable, .skip]) + + if verbose { + let testList = tests.map({ $0.1.name }).joined(separator: ", ") + print(""" + --- CONFIG --- + NumSamples: \(numSamples) + Verbose: \(verbose) + IterScale: \(iterationScale) + FixedIters: \(fixedNumIters) + Tests Filter: \(c.filters ?? []) + Tests to run: \(testList) + + --- DATA ---\n + """) + } } - mutating func findTestsToRun() { - registeredBenchmarks.sort() + /// Returns the list of tests to run. + /// + /// - Parameters: + /// - registeredBenchmarks: List of all performance tests to be filtered. + /// - filters: List of explicitly specified tests to run. These can be + /// specified either by a test name or a test number. + /// - tags: Run tests tagged with all of these categories. + /// - skipTags: Don't run tests tagged with any of these categories. + /// - Returns: An array of test number and benchmark info tuples satisfying + /// specified filtering conditions. + static func filterTests( + _ registeredBenchmarks: [BenchmarkInfo], + filters: [String], + tags: Set, + skipTags: Set + ) -> [(index: String, info: BenchmarkInfo)] { let indices = Dictionary(uniqueKeysWithValues: - zip(registeredBenchmarks.map{ $0.name }, + zip(registeredBenchmarks.sorted().map{ $0.name }, (1...).lazy.map { String($0) } )) let benchmarkNamesOrIndices = Set(filters) - // needed so we don't capture an ivar of a mutable inout self. - let (_tags, _skipTags) = (tags, skipTags) - tests = registeredBenchmarks.filter { benchmark in + return registeredBenchmarks.filter { benchmark in if benchmarkNamesOrIndices.isEmpty { - return benchmark.tags.isSuperset(of: _tags) && - benchmark.tags.isDisjoint(with: _skipTags) + return benchmark.tags.isSuperset(of: tags) && + benchmark.tags.isDisjoint(with: skipTags) } else { return benchmarkNamesOrIndices.contains(benchmark.name) || benchmarkNamesOrIndices.contains(indices[benchmark.name]!) @@ -384,23 +404,6 @@ func runBench(_ test: BenchmarkInfo, _ c: TestConfig) -> BenchResults? { maxRSS: UInt64(sampler.measureMemoryUsage())) } -func printRunInfo(_ c: TestConfig) { - if c.verbose { - print("--- CONFIG ---") - print("NumSamples: \(c.numSamples)") - print("Verbose: \(c.verbose)") - print("IterScale: \(c.iterationScale)") - if c.fixedNumIters != 0 { - print("FixedIters: \(c.fixedNumIters)") - } - print("Tests Filter: \(c.filters)") - print("Tests to run: ", terminator: "") - print(c.tests.map({ $0.1.name }).joined(separator: ", ")) - print("") - print("--- DATA ---") - } -} - /// Execute benchmarks and continuously report the measurement results. func runBenchmarks(_ c: TestConfig) { let withUnit = {$0 + "(us)"} @@ -440,7 +443,7 @@ func runBenchmarks(_ c: TestConfig) { public func main() { do { - var config = try TestConfig() + let config = try TestConfig(registeredBenchmarks) switch (config.action) { case let .help(validOptions): print("Valid options:") @@ -448,7 +451,6 @@ public func main() { print(" \(v)") } case .listTests: - config.findTestsToRun() print("#\(config.delim)Test\(config.delim)[Tags]") for (index, t) in config.tests { let testDescription = [String(index), t.name, t.tags.sorted().description] @@ -456,8 +458,6 @@ public func main() { print(testDescription) } case .run: - config.findTestsToRun() - printRunInfo(config) runBenchmarks(config) if let x = config.afterRunSleep { sleep(UInt32(x)) From 48d1558d077dc86d89f5126ccdc8082ba0804deb Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 15:05:59 +0200 Subject: [PATCH 23/34] [benchmark][Gardening] Nested test filter funcs Refactored `filterTests` to use two nested functions that better reveal the underlying logic. --- benchmark/utils/DriverUtils.swift | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 53d638c586b69..8c492f7f3a6ff 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -184,19 +184,21 @@ struct TestConfig { skipTags: Set ) -> [(index: String, info: BenchmarkInfo)] { let indices = Dictionary(uniqueKeysWithValues: - zip(registeredBenchmarks.sorted().map{ $0.name }, + zip(registeredBenchmarks.sorted().map { $0.name }, (1...).lazy.map { String($0) } )) - let benchmarkNamesOrIndices = Set(filters) + let specifiedTests = Set(filters) - return registeredBenchmarks.filter { benchmark in - if benchmarkNamesOrIndices.isEmpty { - return benchmark.tags.isSuperset(of: tags) && - benchmark.tags.isDisjoint(with: skipTags) - } else { - return benchmarkNamesOrIndices.contains(benchmark.name) || - benchmarkNamesOrIndices.contains(indices[benchmark.name]!) - } - }.map { (index: indices[$0.name]!, info: $0) } + func byTags(b: BenchmarkInfo) -> Bool { + return b.tags.isSuperset(of: tags) && + b.tags.isDisjoint(with: skipTags) + } + func byNamesOrIndices(b: BenchmarkInfo) -> Bool { + return specifiedTests.contains(b.name) || + specifiedTests.contains(indices[b.name]!) + } // !! "All registeredBenchmarks have been assigned an index" + return registeredBenchmarks + .filter(filters.isEmpty ? byTags : byNamesOrIndices) + .map { (index: indices[$0.name]!, info: $0) } } } From 1841ddef5be1c830259969f17332259bf932feee Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 22:44:15 +0200 Subject: [PATCH 24/34] [benchmark][Gardening] Unified argument parsing Renamed `optionalArg` to `parseArg` because it can now also handle the positional arguments. Now all the command line arguments are handled through this function. Minor naming improvement of the `filterTests` parameter. --- benchmark/utils/DriverUtils.swift | 44 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 8c492f7f3a6ff..4dd251e12352c 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -69,7 +69,7 @@ struct TestConfig { var fixedNumIters: UInt? var verbose: Bool? var action: TestAction? - var filters: [String]? + var tests: [String]? } var c = PartialTestConfig() @@ -98,19 +98,21 @@ struct TestConfig { value: value, type: type, argument: argument) } - func optionalArg( - _ name: String, + func parseArg( + _ name: String?, _ property: WritableKeyPath, defaultValue: T? = nil, parser parse: (String) throws -> T? = { _ in nil } ) throws { - if let value = benchArgs.optionalArgsMap[name] { + if let name = name, let value = benchArgs.optionalArgsMap[name] { guard !value.isEmpty || defaultValue != nil else { throw ArgumentError.missingValue(name) } c[keyPath: property] = (value.isEmpty) ? defaultValue! : try checked(parse, value, argument:name) + } else if name == nil { + c[keyPath: property] = benchArgs.positionalArgs as! T } } @@ -124,19 +126,18 @@ struct TestConfig { } // Parse command line arguments - try optionalArg("--iter-scale", \.iterationScale) { Int($0) } - try optionalArg("--num-iters", \.fixedNumIters) { UInt($0) } - try optionalArg("--num-samples", \.numSamples) { Int($0) } - try optionalArg("--verbose", \.verbose, defaultValue: true) - try optionalArg("--delim", \.delim) { $0 } - try optionalArg("--tags", \PartialTestConfig.tags, parser: tags) - try optionalArg("--skip-tags", \PartialTestConfig.skipTags, + try parseArg("--iter-scale", \.iterationScale) { Int($0) } + try parseArg("--num-iters", \.fixedNumIters) { UInt($0) } + try parseArg("--num-samples", \.numSamples) { Int($0) } + try parseArg("--verbose", \.verbose, defaultValue: true) + try parseArg("--delim", \.delim) { $0 } + try parseArg("--tags", \PartialTestConfig.tags, parser: tags) + try parseArg("--skip-tags", \PartialTestConfig.skipTags, defaultValue: [], parser: tags) - try optionalArg("--sleep", \.afterRunSleep) { Int($0) } - try optionalArg("--list", \.action, defaultValue: .listTests) - try optionalArg("--help", \.action, defaultValue: .help(validOptions)) - - c.filters = benchArgs.positionalArgs + try parseArg("--sleep", \.afterRunSleep) { Int($0) } + try parseArg("--list", \.action, defaultValue: .listTests) + try parseArg("--help", \.action, defaultValue: .help(validOptions)) + try parseArg(nil, \.tests) // positional arguments // Configure from the command line arguments, filling in the defaults. delim = c.delim ?? "," @@ -147,7 +148,7 @@ struct TestConfig { afterRunSleep = c.afterRunSleep action = c.action ?? .run tests = TestConfig.filterTests(registeredBenchmarks, - filters: c.filters ?? [], + specifiedTests: Set(c.tests ?? []), tags: c.tags ?? [], skipTags: c.skipTags ?? [.unstable, .skip]) @@ -159,7 +160,7 @@ struct TestConfig { Verbose: \(verbose) IterScale: \(iterationScale) FixedIters: \(fixedNumIters) - Tests Filter: \(c.filters ?? []) + Tests Filter: \(c.tests ?? []) Tests to run: \(testList) --- DATA ---\n @@ -171,7 +172,7 @@ struct TestConfig { /// /// - Parameters: /// - registeredBenchmarks: List of all performance tests to be filtered. - /// - filters: List of explicitly specified tests to run. These can be + /// - specifiedTests: List of explicitly specified tests to run. These can be /// specified either by a test name or a test number. /// - tags: Run tests tagged with all of these categories. /// - skipTags: Don't run tests tagged with any of these categories. @@ -179,14 +180,13 @@ struct TestConfig { /// specified filtering conditions. static func filterTests( _ registeredBenchmarks: [BenchmarkInfo], - filters: [String], + specifiedTests: Set, tags: Set, skipTags: Set ) -> [(index: String, info: BenchmarkInfo)] { let indices = Dictionary(uniqueKeysWithValues: zip(registeredBenchmarks.sorted().map { $0.name }, (1...).lazy.map { String($0) } )) - let specifiedTests = Set(filters) func byTags(b: BenchmarkInfo) -> Bool { return b.tags.isSuperset(of: tags) && @@ -197,7 +197,7 @@ struct TestConfig { specifiedTests.contains(indices[b.name]!) } // !! "All registeredBenchmarks have been assigned an index" return registeredBenchmarks - .filter(filters.isEmpty ? byTags : byNamesOrIndices) + .filter(specifiedTests.isEmpty ? byTags : byNamesOrIndices) .map { (index: indices[$0.name]!, info: $0) } } } From 9f902866ea488987048e90fe7c4f5c58b65c8327 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Fri, 20 Jul 2018 22:52:04 +0200 Subject: [PATCH 25/34] [benchmark][Gardening] Extracted `ArgumentParser` Moved the argument parsing logic into new class `ArgumentParser`. The `checked` function is also moved to the ArgParse.swift, next to the parser. --- benchmark/utils/ArgParse.swift | 52 ++++++++++++++++++++++++ benchmark/utils/DriverUtils.swift | 66 +++++++------------------------ 2 files changed, 67 insertions(+), 51 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index 01240bbf20d2c..41faeec3c33e5 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -97,3 +97,55 @@ public func parseArgs(_ validOptions: [String]? = nil) return Arguments(progName, positionalArgs, optionalArgsMap) } + +/// Returns the argument value converted to the type T using the parser. +/// If the parser cannot create the value of specified type, throw an invalid +/// type argument error. +func checked( + _ parse: (String) throws -> T?, + _ value: String, + argument: String? = nil +) throws -> T { + if let t = try parse(value) { return t } + var type = "\(T.self)" + if type.starts(with: "Optional<") { + let s = type.index(after: type.index(of:"<")!) + let e = type.index(before: type.endIndex) // ">" + type = String(type[s.. + } + throw ArgumentError.invalidType( + value: value, type: type, argument: argument) +} + +class ArgumentParser { + var result: U + let validOptions: [String] + private let benchArgs: Arguments + + init(into result: U, validOptions: [String]) throws { + self.result = result + self.validOptions = validOptions + guard let benchArgs = parseArgs(validOptions) else { + throw ArgumentError.general("Failed to parse arguments") + } + self.benchArgs = benchArgs + } + + func parseArg( + _ name: String?, + _ property: WritableKeyPath, + defaultValue: T? = nil, + parser parse: (String) throws -> T? = { _ in nil } + ) throws { + if let name = name, let value = benchArgs.optionalArgsMap[name] { + guard !value.isEmpty || defaultValue != nil + else { throw ArgumentError.missingValue(name) } + + result[keyPath: property] = (value.isEmpty) + ? defaultValue! + : try checked(parse, value, argument:name) + } else if name == nil { + result[keyPath: property] = benchArgs.positionalArgs as! T + } + } +} diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 4dd251e12352c..336ce0bf27169 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -71,50 +71,12 @@ struct TestConfig { var action: TestAction? var tests: [String]? } - var c = PartialTestConfig() - let validOptions = [ + let p = try ArgumentParser(into: PartialTestConfig(), validOptions: [ "--iter-scale", "--num-samples", "--num-iters", "--verbose", "--delim", "--list", "--sleep", "--tags", "--skip-tags", "--help" - ] - guard let benchArgs = parseArgs(validOptions) else { - throw ArgumentError.general("Failed to parse arguments") - } - - func checked( - _ parse: (String) throws -> T?, - _ value: String, - argument: String? = nil - ) throws -> T { - if let t = try parse(value) { return t } - var type = "\(T.self)" - if type.starts(with: "Optional<") { - let s = type.index(after: type.index(of:"<")!) - let e = type.index(before: type.endIndex) // ">" - type = String(type[s.. - } - throw ArgumentError.invalidType( - value: value, type: type, argument: argument) - } - - func parseArg( - _ name: String?, - _ property: WritableKeyPath, - defaultValue: T? = nil, - parser parse: (String) throws -> T? = { _ in nil } - ) throws { - if let name = name, let value = benchArgs.optionalArgsMap[name] { - guard !value.isEmpty || defaultValue != nil - else { throw ArgumentError.missingValue(name) } - - c[keyPath: property] = (value.isEmpty) - ? defaultValue! - : try checked(parse, value, argument:name) - } else if name == nil { - c[keyPath: property] = benchArgs.positionalArgs as! T - } - } + ]) func tags(tags: String) throws -> Set { // We support specifying multiple tags by splitting on comma, i.e.: @@ -126,18 +88,20 @@ struct TestConfig { } // Parse command line arguments - try parseArg("--iter-scale", \.iterationScale) { Int($0) } - try parseArg("--num-iters", \.fixedNumIters) { UInt($0) } - try parseArg("--num-samples", \.numSamples) { Int($0) } - try parseArg("--verbose", \.verbose, defaultValue: true) - try parseArg("--delim", \.delim) { $0 } - try parseArg("--tags", \PartialTestConfig.tags, parser: tags) - try parseArg("--skip-tags", \PartialTestConfig.skipTags, + try p.parseArg("--iter-scale", \.iterationScale) { Int($0) } + try p.parseArg("--num-iters", \.fixedNumIters) { UInt($0) } + try p.parseArg("--num-samples", \.numSamples) { Int($0) } + try p.parseArg("--verbose", \.verbose, defaultValue: true) + try p.parseArg("--delim", \.delim) { $0 } + try p.parseArg("--tags", \PartialTestConfig.tags, parser: tags) + try p.parseArg("--skip-tags", \PartialTestConfig.skipTags, defaultValue: [], parser: tags) - try parseArg("--sleep", \.afterRunSleep) { Int($0) } - try parseArg("--list", \.action, defaultValue: .listTests) - try parseArg("--help", \.action, defaultValue: .help(validOptions)) - try parseArg(nil, \.tests) // positional arguments + try p.parseArg("--sleep", \.afterRunSleep) { Int($0) } + try p.parseArg("--list", \.action, defaultValue: .listTests) + try p.parseArg("--help", \.action, defaultValue: .help(p.validOptions)) + try p.parseArg(nil, \.tests) // positional arguments + + let c = p.result // Configure from the command line arguments, filling in the defaults. delim = c.delim ?? "," From e5cbfccd2272fb662099913d8516719bd1dbe7dd Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 21 Jul 2018 01:10:36 +0200 Subject: [PATCH 26/34] [benchmark][Gardening] Declarative ArgumentParser The `ArgumentParser` now has a configuration phase which specifies the supported arguments and their handling. The configured parser is then executed using the `parse` method which returns the parsed result. --- benchmark/utils/ArgParse.swift | 38 ++++++++++++++++++++++--------- benchmark/utils/DriverUtils.swift | 33 ++++++++++++--------------- 2 files changed, 41 insertions(+), 30 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index 41faeec3c33e5..bc10f892a05a6 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -119,23 +119,39 @@ func checked( class ArgumentParser { var result: U - let validOptions: [String] - private let benchArgs: Arguments + var validOptions: [String] = [] + private var benchArgs: Arguments! + private var parsers: [() throws -> ()] = [] - init(into result: U, validOptions: [String]) throws { + init(into result: U) throws { self.result = result - self.validOptions = validOptions - guard let benchArgs = parseArgs(validOptions) else { - throw ArgumentError.general("Failed to parse arguments") - } - self.benchArgs = benchArgs } - func parseArg( + func parse() throws -> U { + guard let benchArgs = parseArgs(validOptions) else { + throw ArgumentError.general("Failed to parse arguments") + } + self.benchArgs = benchArgs + try parsers.forEach { try $0() } // parse all arguments + return result + } + + func addArgument( _ name: String?, _ property: WritableKeyPath, defaultValue: T? = nil, - parser parse: (String) throws -> T? = { _ in nil } + parser: @escaping (String) throws -> T? = { _ in nil } + ) { + if let name = name { validOptions.append(name) } + parsers.append( + { try self.parseArgument(name, property, defaultValue, parser) }) + } + + func parseArgument( + _ name: String?, + _ property: WritableKeyPath, + _ defaultValue: T?, + _ parse: (String) throws -> T? ) throws { if let name = name, let value = benchArgs.optionalArgsMap[name] { guard !value.isEmpty || defaultValue != nil @@ -143,7 +159,7 @@ class ArgumentParser { result[keyPath: property] = (value.isEmpty) ? defaultValue! - : try checked(parse, value, argument:name) + : try checked(parse, value, argument: name) } else if name == nil { result[keyPath: property] = benchArgs.positionalArgs as! T } diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 336ce0bf27169..33aa36a66e26d 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -72,12 +72,6 @@ struct TestConfig { var tests: [String]? } - let p = try ArgumentParser(into: PartialTestConfig(), validOptions: [ - "--iter-scale", "--num-samples", "--num-iters", - "--verbose", "--delim", "--list", "--sleep", - "--tags", "--skip-tags", "--help" - ]) - func tags(tags: String) throws -> Set { // We support specifying multiple tags by splitting on comma, i.e.: // --tags=Array,Dictionary @@ -87,21 +81,22 @@ struct TestConfig { try checked({ BenchmarkCategory(rawValue: $0) }, $0) }) } - // Parse command line arguments - try p.parseArg("--iter-scale", \.iterationScale) { Int($0) } - try p.parseArg("--num-iters", \.fixedNumIters) { UInt($0) } - try p.parseArg("--num-samples", \.numSamples) { Int($0) } - try p.parseArg("--verbose", \.verbose, defaultValue: true) - try p.parseArg("--delim", \.delim) { $0 } - try p.parseArg("--tags", \PartialTestConfig.tags, parser: tags) - try p.parseArg("--skip-tags", \PartialTestConfig.skipTags, + // Configure the command line argument parser + let p = try ArgumentParser(into: PartialTestConfig()) + p.addArgument("--iter-scale", \.iterationScale) { Int($0) } + p.addArgument("--num-iters", \.fixedNumIters) { UInt($0) } + p.addArgument("--num-samples", \.numSamples) { Int($0) } + p.addArgument("--verbose", \.verbose, defaultValue: true) + p.addArgument("--delim", \.delim) { $0 } + p.addArgument("--tags", \PartialTestConfig.tags, parser: tags) + p.addArgument("--skip-tags", \PartialTestConfig.skipTags, defaultValue: [], parser: tags) - try p.parseArg("--sleep", \.afterRunSleep) { Int($0) } - try p.parseArg("--list", \.action, defaultValue: .listTests) - try p.parseArg("--help", \.action, defaultValue: .help(p.validOptions)) - try p.parseArg(nil, \.tests) // positional arguments + p.addArgument("--sleep", \.afterRunSleep) { Int($0) } + p.addArgument("--list", \.action, defaultValue: .listTests) + p.addArgument("--help", \.action, defaultValue: .help(p.validOptions)) + p.addArgument(nil, \.tests) // positional arguments - let c = p.result + let c = try p.parse() // Configure from the command line arguments, filling in the defaults. delim = c.delim ?? "," From f674dd5cf054d57732f47313a31889721e2e2105 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 21 Jul 2018 13:16:08 +0200 Subject: [PATCH 27/34] [benchmark][Gardening] Handle --help inside parser Moved the printing of help message inside the `ArgumentParser`, which has all the necessary info. Added test that checks the `--help` option. --- benchmark/utils/ArgParse.swift | 30 ++++++++++++++++++++++++------ benchmark/utils/DriverUtils.swift | 7 ------- test/benchmark/Benchmark_O.test.md | 11 +++++++++++ 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index bc10f892a05a6..2cd9c4d605ea9 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -117,14 +117,33 @@ func checked( value: value, type: type, argument: argument) } +struct Argument { + let name: String? + let apply: () throws -> () +} + class ArgumentParser { var result: U - var validOptions: [String] = [] + var validOptions: [String] { + return arguments.compactMap { $0.name } + } private var benchArgs: Arguments! - private var parsers: [() throws -> ()] = [] + private var arguments: [Argument] = [] init(into result: U) throws { - self.result = result + self.result = result + self.arguments += [ + Argument(name: "--help", apply: printUsage) + ] + } + + func printUsage() { + guard let _ = benchArgs.optionalArgsMap["--help"] else { return } + print("Valid options:") + for v in validOptions { + print(" \(v)") + } + exit(0) } func parse() throws -> U { @@ -132,7 +151,7 @@ class ArgumentParser { throw ArgumentError.general("Failed to parse arguments") } self.benchArgs = benchArgs - try parsers.forEach { try $0() } // parse all arguments + try arguments.forEach { try $0.apply() } // parse all arguments return result } @@ -142,8 +161,7 @@ class ArgumentParser { defaultValue: T? = nil, parser: @escaping (String) throws -> T? = { _ in nil } ) { - if let name = name { validOptions.append(name) } - parsers.append( + arguments.append(Argument(name: name) { try self.parseArgument(name, property, defaultValue, parser) }) } diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 33aa36a66e26d..ebe8b877b0b66 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -27,7 +27,6 @@ public var registeredBenchmarks: [BenchmarkInfo] = [] enum TestAction { case run case listTests - case help([String]) } struct TestConfig { @@ -93,7 +92,6 @@ struct TestConfig { defaultValue: [], parser: tags) p.addArgument("--sleep", \.afterRunSleep) { Int($0) } p.addArgument("--list", \.action, defaultValue: .listTests) - p.addArgument("--help", \.action, defaultValue: .help(p.validOptions)) p.addArgument(nil, \.tests) // positional arguments let c = try p.parse() @@ -406,11 +404,6 @@ public func main() { do { let config = try TestConfig(registeredBenchmarks) switch (config.action) { - case let .help(validOptions): - print("Valid options:") - for v in validOptions { - print(" \(v)") - } case .listTests: print("#\(config.delim)Test\(config.delim)[Tags]") for (index, t) in config.tests { diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index 4927f09a104fa..a8292166e58fd 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -171,3 +171,14 @@ RUN: 2>&1 | %FileCheck %s --check-prefix BADSKIPTAG BADSKIPTAG: 'bogus' is not a valid 'BenchmarkCategory' ```` + +## Usage + +```` +RUN: %Benchmark_O --help | %FileCheck %s --check-prefix OPTIONS +OPTIONS: Valid options: +OPTIONS: --verbose +OPTIONS: --delim +OPTIONS: --tags +OPTIONS: --list +```` From 50c79c5972c9a37b4baef5f057c4616834bd2fc9 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 21 Jul 2018 13:30:32 +0200 Subject: [PATCH 28/34] [benchmark][Gardening] Local parser error handling In case of invalid command line arguments, there is no reasonable recovery, the `ArgumentParser` can exit the program itself. It is therefore no longer necessary to propagate the `ArgumentError`s outside of the parser. --- benchmark/utils/ArgParse.swift | 23 +++++++++++------ benchmark/utils/DriverUtils.swift | 41 ++++++++++++------------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index 2cd9c4d605ea9..ca1c12cb6aef1 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -130,7 +130,7 @@ class ArgumentParser { private var benchArgs: Arguments! private var arguments: [Argument] = [] - init(into result: U) throws { + init(into result: U) { self.result = result self.arguments += [ Argument(name: "--help", apply: printUsage) @@ -146,13 +146,22 @@ class ArgumentParser { exit(0) } - func parse() throws -> U { - guard let benchArgs = parseArgs(validOptions) else { - throw ArgumentError.general("Failed to parse arguments") + func parse() -> U { + do { + guard let benchArgs = parseArgs(validOptions) else { + throw ArgumentError.general("Failed to parse arguments") + } + self.benchArgs = benchArgs + try arguments.forEach { try $0.apply() } // parse all arguments + return result + } catch let error as ArgumentError { + fflush(stdout) + fputs("\(error)\n", stderr) + fflush(stderr) + exit(1) + } catch { + fatalError("\(error)") } - self.benchArgs = benchArgs - try arguments.forEach { try $0.apply() } // parse all arguments - return result } func addArgument( diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index ebe8b877b0b66..68d70a839e382 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -59,7 +59,7 @@ struct TestConfig { let action: TestAction - init(_ registeredBenchmarks: [BenchmarkInfo]) throws { + init(_ registeredBenchmarks: [BenchmarkInfo]) { struct PartialTestConfig { var delim: String? @@ -81,7 +81,7 @@ struct TestConfig { } // Configure the command line argument parser - let p = try ArgumentParser(into: PartialTestConfig()) + let p = ArgumentParser(into: PartialTestConfig()) p.addArgument("--iter-scale", \.iterationScale) { Int($0) } p.addArgument("--num-iters", \.fixedNumIters) { UInt($0) } p.addArgument("--num-samples", \.numSamples) { Int($0) } @@ -94,7 +94,7 @@ struct TestConfig { p.addArgument("--list", \.action, defaultValue: .listTests) p.addArgument(nil, \.tests) // positional arguments - let c = try p.parse() + let c = p.parse() // Configure from the command line arguments, filling in the defaults. delim = c.delim ?? "," @@ -401,28 +401,19 @@ func runBenchmarks(_ c: TestConfig) { } public func main() { - do { - let config = try TestConfig(registeredBenchmarks) - switch (config.action) { - case .listTests: - print("#\(config.delim)Test\(config.delim)[Tags]") - for (index, t) in config.tests { - let testDescription = [String(index), t.name, t.tags.sorted().description] - .joined(separator: config.delim) - print(testDescription) - } - case .run: - runBenchmarks(config) - if let x = config.afterRunSleep { - sleep(UInt32(x)) - } + let config = TestConfig(registeredBenchmarks) + switch (config.action) { + case .listTests: + print("#\(config.delim)Test\(config.delim)[Tags]") + for (index, t) in config.tests { + let testDescription = [String(index), t.name, t.tags.sorted().description] + .joined(separator: config.delim) + print(testDescription) + } + case .run: + runBenchmarks(config) + if let x = config.afterRunSleep { + sleep(UInt32(x)) } - } catch let error as ArgumentError { - fflush(stdout) - fputs("\(error)\n", stderr) - fflush(stderr) - exit(1) - } catch { - fatalError("\(error)") } } From 0c0ed3d35df6511ef9edbd3402926f0cdf7d3cb3 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 21 Jul 2018 15:51:43 +0200 Subject: [PATCH 29/34] [benchmark][Gardening] Moved parseArgs into parser The `parseArgs` funtion is now a private method on `ArgumentParser`. Removed `Arguments` struct and moved the `Argument` as a nested struct into the parser. Adjusted error messages and the corresponding checks. --- benchmark/utils/ArgParse.swift | 168 ++++++++++++++--------------- test/benchmark/Benchmark_O.test.md | 21 ++-- 2 files changed, 89 insertions(+), 100 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index ca1c12cb6aef1..5a4efc8616b6a 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -12,90 +12,25 @@ import Foundation -public struct Arguments { - public var progName: String - public var positionalArgs: [String] - public var optionalArgsMap: [String : String] - - init(_ pName: String, _ posArgs: [String], _ optArgsMap: [String : String]) { - progName = pName - positionalArgs = posArgs - optionalArgsMap = optArgsMap - } -} - enum ArgumentError: Error { case missingValue(String) case invalidType(value: String, type: String, argument: String?) - case general(String) + case unsupportedArgument(String) } extension ArgumentError: CustomStringConvertible { public var description: String { switch self { case let .missingValue(key): - return "\(key) requires a value" //"Missing value for `\(key)`" + return "missing value for '\(key)'" case let .invalidType(value, type, argument): return (argument == nil) ? "'\(value)' is not a valid '\(type)'" : "'\(value)' is not a valid '\(type)' for '\(argument!)'" - case let .general(description): - return "\(description)" - } - } -} - -/// Using CommandLine.arguments, returns an Arguments struct describing -/// the arguments to this program. If we fail to parse arguments, we -/// return nil. -/// -/// We assume that optional switch args are of the form: -/// -/// --opt-name[=opt-value] -/// -opt-name[=opt-value] -/// -/// with opt-name and opt-value not containing any '=' signs. Any -/// other option passed in is assumed to be a positional argument. -public func parseArgs(_ validOptions: [String]? = nil) - -> Arguments? { - let progName = CommandLine.arguments[0] - var positionalArgs = [String]() - var optionalArgsMap = [String : String]() - - // For each argument we are passed... - var passThroughArgs = false - for arg in CommandLine.arguments[1..( value: value, type: type, argument: argument) } -struct Argument { - let name: String? - let apply: () throws -> () -} - +/// Parser that converts the program's command line arguments to typed values +/// according to the parser's configuration, storing them in the provided +/// instance of a value-holding type. class ArgumentParser { - var result: U - var validOptions: [String] { + private var result: U + private var validOptions: [String] { return arguments.compactMap { $0.name } } - private var benchArgs: Arguments! private var arguments: [Argument] = [] + private let progName = CommandLine.arguments[0] + private var positionalArgs = [String]() + private var optionalArgsMap = [String : String]() + + // Argument holds the name of the command line parameter and the value + // processing closure used to convert it into given type and storing it + // in the parsing result. + struct Argument { + let name: String? + let apply: () throws -> () + } + /// ArgumentParser is initialized with an instance of a type that holds + /// the results of the parsing of the individual command line arguments. init(into result: U) { self.result = result self.arguments += [ @@ -137,8 +82,8 @@ class ArgumentParser { ] } - func printUsage() { - guard let _ = benchArgs.optionalArgsMap["--help"] else { return } + private func printUsage() { + guard let _ = self.optionalArgsMap["--help"] else { return } print("Valid options:") for v in validOptions { print(" \(v)") @@ -146,17 +91,17 @@ class ArgumentParser { exit(0) } - func parse() -> U { + /// Parses the command line arguments, returning the result filled with + /// specified argument values or report errors and exit the program if + /// the parsing fails. + public func parse() -> U { do { - guard let benchArgs = parseArgs(validOptions) else { - throw ArgumentError.general("Failed to parse arguments") - } - self.benchArgs = benchArgs + try parseArgs() try arguments.forEach { try $0.apply() } // parse all arguments return result } catch let error as ArgumentError { fflush(stdout) - fputs("\(error)\n", stderr) + fputs("error: \(error)\n", stderr) fflush(stderr) exit(1) } catch { @@ -164,7 +109,52 @@ class ArgumentParser { } } - func addArgument( + /// Using CommandLine.arguments, parses the structure of optional and + /// positional arguments of this program. Failure to parse arguments + /// throws correspondding ArgumentError. + /// + /// We assume that optional switch args are of the form: + /// + /// --opt-name[=opt-value] + /// -opt-name[=opt-value] + /// + /// with opt-name and opt-value not containing any '=' signs. Any + /// other option passed in is assumed to be a positional argument. + private func parseArgs() throws { + + // For each argument we are passed... + var passThroughArgs = false + for arg in CommandLine.arguments[1..( _ name: String?, _ property: WritableKeyPath, defaultValue: T? = nil, @@ -174,13 +164,13 @@ class ArgumentParser { { try self.parseArgument(name, property, defaultValue, parser) }) } - func parseArgument( + private func parseArgument( _ name: String?, _ property: WritableKeyPath, _ defaultValue: T?, _ parse: (String) throws -> T? ) throws { - if let name = name, let value = benchArgs.optionalArgsMap[name] { + if let name = name, let value = optionalArgsMap[name] { guard !value.isEmpty || defaultValue != nil else { throw ArgumentError.missingValue(name) } @@ -188,7 +178,7 @@ class ArgumentParser { ? defaultValue! : try checked(parse, value, argument: name) } else if name == nil { - result[keyPath: property] = benchArgs.positionalArgs as! T + result[keyPath: property] = positionalArgs as! T } } } diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index a8292166e58fd..8437bbf0624fd 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -131,44 +131,43 @@ LOGVERBOSE-LABEL: Running AngryPhonebook for 2 samples. ```` RUN: not %Benchmark_O --bogus 2>&1 \ RUN: | %FileCheck %s --check-prefix ARGPARSE -ARGPARSE: Invalid option: --bogus -ARGPARSE: Failed to parse arguments +ARGPARSE: error: unsupported argument '--bogus' RUN: not %Benchmark_O --iter-scale \ RUN: 2>&1 | %FileCheck %s --check-prefix NOVALUE -NOVALUE: --iter-scale requires a value +NOVALUE: error: missing value for '--iter-scale' RUN: not %Benchmark_O --iter-scale= \ RUN: 2>&1 | %FileCheck %s --check-prefix EMPTYVAL -EMPTYVAL: --iter-scale requires a value +EMPTYVAL: error: missing value for '--iter-scale' RUN: not %Benchmark_O --iter-scale=NaN \ RUN: 2>&1 | %FileCheck %s --check-prefix NANVALUE -NANVALUE: 'NaN' is not a valid 'Int' for '--iter-scale' +NANVALUE: error: 'NaN' is not a valid 'Int' for '--iter-scale' RUN: not %Benchmark_O --num-iters \ RUN: 2>&1 | %FileCheck %s --check-prefix NUMITERS -NUMITERS: --num-iters requires a value +NUMITERS: error: missing value for '--num-iters' RUN: not %Benchmark_O --num-samples \ RUN: 2>&1 | %FileCheck %s --check-prefix NUMSAMPLES -NUMSAMPLES: --num-samples requires a value +NUMSAMPLES: error: missing value for '--num-samples' RUN: not %Benchmark_O --sleep \ RUN: 2>&1 | %FileCheck %s --check-prefix SLEEP -SLEEP: --sleep requires a value +SLEEP: error: missing value for '--sleep' RUN: not %Benchmark_O --delim \ RUN: 2>&1 | %FileCheck %s --check-prefix DELIM -DELIM: --delim requires a value +DELIM: error: missing value for '--delim' RUN: not %Benchmark_O --tags=bogus \ RUN: 2>&1 | %FileCheck %s --check-prefix BADTAG -BADTAG: 'bogus' is not a valid 'BenchmarkCategory' +BADTAG: error: 'bogus' is not a valid 'BenchmarkCategory' RUN: not %Benchmark_O --skip-tags=bogus \ RUN: 2>&1 | %FileCheck %s --check-prefix BADSKIPTAG -BADSKIPTAG: 'bogus' is not a valid 'BenchmarkCategory' +BADSKIPTAG: error: 'bogus' is not a valid 'BenchmarkCategory' ```` From f89d41ad3bbc918839a5795506b8ef2d10c95414 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 21 Jul 2018 22:58:44 +0200 Subject: [PATCH 30/34] [benchmark] Print detailed argument help The `--help` option now prints standard usage description with documentaion for all arguments: ```` $ Benchmark_O --help usage: Benchmark_O [--argument=VALUE] [TEST [TEST ...]] positional arguments: TEST name or number of the benchmark to measure optional arguments: --help show this help message and exit --num-samples number of samples to take per benchmark; default: 1 --num-iters number of iterations averaged in the sample; default: auto-scaled to measure for 1 second --iter-scale number of seconds used for num-iters calculation default: 1 --verbose increase output verbosity --delim value delimiter used for log output; default: , --tags run tests matching all the specified categories --skip-tags don't run tests matching any of the specified categories; default: unstable,skip --sleep number of seconds to sleep after benchmarking --list don't run the tests, just log the list of test numbers, names and tags (respects specified filters) ```` --- benchmark/utils/ArgParse.swift | 43 ++++++++++++++++++++++------ benchmark/utils/DriverUtils.swift | 46 ++++++++++++++++++------------ test/benchmark/Benchmark_O.test.md | 5 +++- 3 files changed, 67 insertions(+), 27 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index 5a4efc8616b6a..cf505984120c1 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -61,7 +61,12 @@ class ArgumentParser { return arguments.compactMap { $0.name } } private var arguments: [Argument] = [] - private let progName = CommandLine.arguments[0] + private let programName: String = { + // Strip full path from the program name. + let r = CommandLine.arguments[0].reversed() + let ss = r[r.startIndex ..< (r.index(of:"/") ?? r.endIndex)] + return String(ss.reversed()) + }() private var positionalArgs = [String]() private var optionalArgsMap = [String : String]() @@ -70,6 +75,7 @@ class ArgumentParser { // in the parsing result. struct Argument { let name: String? + let help: String? let apply: () throws -> () } @@ -78,16 +84,36 @@ class ArgumentParser { init(into result: U) { self.result = result self.arguments += [ - Argument(name: "--help", apply: printUsage) + Argument(name: "--help", help: "show this help message and exit", + apply: printUsage) ] } private func printUsage() { - guard let _ = self.optionalArgsMap["--help"] else { return } - print("Valid options:") - for v in validOptions { - print(" \(v)") - } + guard let _ = optionalArgsMap["--help"] else { return } + let space = " " + let maxLength = arguments.compactMap({ $0.name?.count }).max()! + let padded = { (s: String) in + " \(s)\(String(repeating:space, count: maxLength - s.count)) " } + let f: (String, String) -> String = { + "\(padded($0))\($1)" + .split(separator: "\n") + .joined(separator: "\n" + padded("")) + } + let positional = f("TEST", "name or number of the benchmark to measure") + let optional = arguments.filter { $0.name != nil } + .map { f($0.name!, $0.help ?? "") } + .joined(separator: "\n") + print( + """ + usage: \(programName) [--argument=VALUE] [TEST [TEST ...]] + + positional arguments: + \(positional) + + optional arguments: + \(optional) + """) exit(0) } @@ -158,9 +184,10 @@ class ArgumentParser { _ name: String?, _ property: WritableKeyPath, defaultValue: T? = nil, + help: String? = nil, parser: @escaping (String) throws -> T? = { _ in nil } ) { - arguments.append(Argument(name: name) + arguments.append(Argument(name: name, help: help) { try self.parseArgument(name, property, defaultValue, parser) }) } diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 68d70a839e382..b74b62b08b6a8 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -30,7 +30,6 @@ enum TestAction { } struct TestConfig { - /// The delimiter to use when printing output. let delim: String /// The scalar multiple of the amount of times a test should be run. This @@ -39,15 +38,8 @@ struct TestConfig { /// longer amount of time to perform performance analysis on the test in /// instruments. let iterationScale: Int - - /// If we are asked to have a fixed number of iterations, the number of fixed - /// iterations. let fixedNumIters: UInt - - /// The number of samples we should take of each test. let numSamples: Int - - /// Is verbose output enabled? let verbose: Bool /// After we run the tests, should the harness sleep to allow for utilities @@ -82,16 +74,34 @@ struct TestConfig { // Configure the command line argument parser let p = ArgumentParser(into: PartialTestConfig()) - p.addArgument("--iter-scale", \.iterationScale) { Int($0) } - p.addArgument("--num-iters", \.fixedNumIters) { UInt($0) } - p.addArgument("--num-samples", \.numSamples) { Int($0) } - p.addArgument("--verbose", \.verbose, defaultValue: true) - p.addArgument("--delim", \.delim) { $0 } - p.addArgument("--tags", \PartialTestConfig.tags, parser: tags) - p.addArgument("--skip-tags", \PartialTestConfig.skipTags, - defaultValue: [], parser: tags) - p.addArgument("--sleep", \.afterRunSleep) { Int($0) } - p.addArgument("--list", \.action, defaultValue: .listTests) + p.addArgument("--num-samples", \.numSamples, + help: "number of samples to take per benchmark; default: 1", + parser: { Int($0) }) + p.addArgument("--num-iters", \.fixedNumIters, + help: "number of iterations averaged in the sample;\n" + + "default: auto-scaled to measure for 1 second", + parser: { UInt($0) }) + p.addArgument("--iter-scale", \.iterationScale, + help: "number of seconds used for num-iters calculation\n" + + "default: 1", parser: { Int($0) }) + p.addArgument("--verbose", \.verbose, defaultValue: true, + help: "increase output verbosity") + p.addArgument("--delim", \.delim, + help:"value delimiter used for log output; default: ,", + parser: { $0 }) + p.addArgument("--tags", \PartialTestConfig.tags, + help: "run tests matching all the specified categories", + parser: tags) + p.addArgument("--skip-tags", \PartialTestConfig.skipTags, defaultValue: [], + help: "don't run tests matching any of the specified\n" + + "categories; default: unstable,skip", + parser: tags) + p.addArgument("--sleep", \.afterRunSleep, + help: "number of seconds to sleep after benchmarking", + parser: { Int($0) }) + p.addArgument("--list", \.action, defaultValue: .listTests, + help: "don't run the tests, just log the list of test \n" + + "numbers, names and tags (respects specified filters)") p.addArgument(nil, \.tests) // positional arguments let c = p.parse() diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index 8437bbf0624fd..f9be27d71aa6d 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -175,7 +175,10 @@ BADSKIPTAG: error: 'bogus' is not a valid 'BenchmarkCategory' ```` RUN: %Benchmark_O --help | %FileCheck %s --check-prefix OPTIONS -OPTIONS: Valid options: +OPTIONS: usage: Benchmark_O [--argument=VALUE] [TEST [TEST ...]] +OPTIONS: optional arguments: +OPTIONS: --help +OPTIONS-SAME: show this help message and exit OPTIONS: --verbose OPTIONS: --delim OPTIONS: --tags From 19613733a4a0cfc21b4df537ba731b2e96d92dc6 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 21 Jul 2018 23:23:06 +0200 Subject: [PATCH 31/34] [benchmark] Log the MAX_RSS only w/ --memory flag Printing of the MAX_RSS is now hidden behind the optional `--memory` flag. --- benchmark/utils/DriverUtils.swift | 12 +++++++++--- test/benchmark/Benchmark_O.test.md | 12 ++++++++---- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index b74b62b08b6a8..9856cf9d7d18e 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -41,6 +41,7 @@ struct TestConfig { let fixedNumIters: UInt let numSamples: Int let verbose: Bool + let logMemory: Bool /// After we run the tests, should the harness sleep to allow for utilities /// like leaks that require a PID to run on the test harness. @@ -59,6 +60,7 @@ struct TestConfig { var iterationScale, numSamples, afterRunSleep: Int? var fixedNumIters: UInt? var verbose: Bool? + var logMemory: Bool? var action: TestAction? var tests: [String]? } @@ -86,6 +88,8 @@ struct TestConfig { "default: 1", parser: { Int($0) }) p.addArgument("--verbose", \.verbose, defaultValue: true, help: "increase output verbosity") + p.addArgument("--memory", \.logMemory, defaultValue: true, + help: "log the change in maximum resident set size (MAX_RSS)") p.addArgument("--delim", \.delim, help:"value delimiter used for log output; default: ,", parser: { $0 }) @@ -112,6 +116,7 @@ struct TestConfig { fixedNumIters = c.fixedNumIters ?? 0 numSamples = c.numSamples ?? 1 verbose = c.verbose ?? false + logMemory = c.logMemory ?? false afterRunSleep = c.afterRunSleep action = c.action ?? .run tests = TestConfig.filterTests(registeredBenchmarks, @@ -125,6 +130,7 @@ struct TestConfig { --- CONFIG --- NumSamples: \(numSamples) Verbose: \(verbose) + LogMemory: \(logMemory) IterScale: \(iterationScale) FixedIters: \(fixedNumIters) Tests Filter: \(c.tests ?? []) @@ -379,7 +385,7 @@ func runBenchmarks(_ c: TestConfig) { let header = ( ["#", "TEST", "SAMPLES"] + ["MIN", "MAX", "MEAN", "SD", "MEDIAN"].map(withUnit) - + ["MAX_RSS(B)"] + + (c.logMemory ? ["MAX_RSS(B)"] : []) ).joined(separator: c.delim) print(header) @@ -387,8 +393,8 @@ func runBenchmarks(_ c: TestConfig) { func report(_ index: String, _ t: BenchmarkInfo, results: BenchResults?) { func values(r: BenchResults) -> [String] { - return [r.sampleCount, r.min, r.max, r.mean, r.sd, r.median, r.maxRSS] - .map { String($0) } + return ([r.sampleCount, r.min, r.max, r.mean, r.sd, r.median] + + (c.logMemory ? [r.maxRSS] : [])).map { String($0) } } let benchmarkStats = ( [index, t.name] + (results.map(values) ?? ["Unsupported"]) diff --git a/test/benchmark/Benchmark_O.test.md b/test/benchmark/Benchmark_O.test.md index f9be27d71aa6d..8098035a04492 100644 --- a/test/benchmark/Benchmark_O.test.md +++ b/test/benchmark/Benchmark_O.test.md @@ -95,24 +95,27 @@ RUN: %Benchmark_O AngryPhonebook --num-iters=1 \ RUN: | %FileCheck %s --check-prefix NUMITERS1 \ RUN: --check-prefix LOGHEADER \ RUN: --check-prefix LOGBENCH -LOGHEADER-LABEL: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us),MAX_RSS(B) +LOGHEADER-LABEL: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us) LOGBENCH: {{[0-9]+}}, NUMITERS1: AngryPhonebook,1 NUMITERS1-NOT: 0,0,0,0,0 -LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} +LOGBENCH-SAME: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} ```` ### Verbose Mode ```` -RUN: %Benchmark_O 1 Ackermann 1 AngryPhonebook --verbose --num-samples=2 \ +RUN: %Benchmark_O 1 Ackermann 1 AngryPhonebook \ +RUN: --verbose --num-samples=2 --memory \ RUN: | %FileCheck %s --check-prefix RUNJUSTONCE \ RUN: --check-prefix CONFIG \ RUN: --check-prefix LOGVERBOSE \ -RUN: --check-prefix MEASUREENV +RUN: --check-prefix MEASUREENV \ +RUN: --check-prefix LOGMEMORY CONFIG: NumSamples: 2 CONFIG: Tests Filter: ["1", "Ackermann", "1", "AngryPhonebook"] CONFIG: Tests to run: Ackermann, AngryPhonebook +LOGMEMORY: #,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us),MAX_RSS(B) LOGVERBOSE-LABEL: Running Ackermann for 2 samples. LOGVERBOSE: Measuring with scale {{[0-9]+}}. LOGVERBOSE-NEXT: Sample 0,{{[0-9]+}} @@ -123,6 +126,7 @@ MEASUREENV: ICS {{[0-9]+}} - {{[0-9]+}} = {{[0-9]+}} MEASUREENV: VCS {{[0-9]+}} - {{[0-9]+}} = {{[0-9]+}} RUNJUSTONCE-LABEL: 1,Ackermann RUNJUSTONCE-NOT: 1,Ackermann +LOGMEMORY: ,{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}},{{[0-9]+}} LOGVERBOSE-LABEL: Running AngryPhonebook for 2 samples. ```` From df5ccf3e262192bfbd5606fa738d60f1c29c6c2b Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 23 Jul 2018 08:42:57 +0200 Subject: [PATCH 32/34] [benchmark][Gardening] Better naming and comments * Restored property doc comments on `TestConfig` * Better name for func `usage` is `getResourceUtilization` --- benchmark/utils/DriverUtils.swift | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index 9856cf9d7d18e..e5dac996ffb20 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -30,6 +30,7 @@ enum TestAction { } struct TestConfig { + /// The delimiter to use when printing output. let delim: String /// The scalar multiple of the amount of times a test should be run. This @@ -38,9 +39,18 @@ struct TestConfig { /// longer amount of time to perform performance analysis on the test in /// instruments. let iterationScale: Int + + /// If we are asked to have a fixed number of iterations, the number of fixed + /// iterations. let fixedNumIters: UInt + + /// The number of samples we should take of each test. let numSamples: Int + + /// Is verbose output enabled? let verbose: Bool + + // Should we log the test's memory usage? let logMemory: Bool /// After we run the tests, should the harness sleep to allow for utilities @@ -256,20 +266,20 @@ class Timer { class SampleRunner { let timer = Timer() - let baseline = SampleRunner.usage() + let baseline = SampleRunner.getResourceUtilization() let c: TestConfig init(_ config: TestConfig) { self.c = config } - private static func usage() -> rusage { + private static func getResourceUtilization() -> rusage { var u = rusage(); getrusage(RUSAGE_SELF, &u); return u } /// Returns maximum resident set size (MAX_RSS) delta in bytes func measureMemoryUsage() -> Int { - var current = SampleRunner.usage() + var current = SampleRunner.getResourceUtilization() let maxRSS = current.ru_maxrss - baseline.ru_maxrss if c.verbose { From 4ed3dcfcc56a8872d39092769f241df77825f37a Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 23 Jul 2018 11:23:30 +0200 Subject: [PATCH 33/34] [benchmark][Gardening] `--sample-time` renaming Sample time is a better name for what was previously called `iter-scale`. --- benchmark/utils/DriverUtils.swift | 34 ++++++++++++++++++------------ test/benchmark/Benchmark_O.test.md | 12 +++++------ 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/benchmark/utils/DriverUtils.swift b/benchmark/utils/DriverUtils.swift index e5dac996ffb20..2b55098355dfd 100644 --- a/benchmark/utils/DriverUtils.swift +++ b/benchmark/utils/DriverUtils.swift @@ -33,15 +33,17 @@ struct TestConfig { /// The delimiter to use when printing output. let delim: String - /// The scalar multiple of the amount of times a test should be run. This - /// enables one to cause tests to run for N iterations longer than they - /// normally would. This is useful when one wishes for a test to run for a + /// Duration of the test measurement in seconds. + /// + /// Used to compute the number of iterations, if no fixed amount is specified. + /// This is useful when one wishes for a test to run for a /// longer amount of time to perform performance analysis on the test in /// instruments. - let iterationScale: Int + let sampleTime: Double /// If we are asked to have a fixed number of iterations, the number of fixed - /// iterations. + /// iterations. The default value of 0 means: automatically compute the + /// number of iterations to measure the test for a specified sample time. let fixedNumIters: UInt /// The number of samples we should take of each test. @@ -67,14 +69,16 @@ struct TestConfig { struct PartialTestConfig { var delim: String? var tags, skipTags: Set? - var iterationScale, numSamples, afterRunSleep: Int? + var numSamples, afterRunSleep: Int? var fixedNumIters: UInt? + var sampleTime: Double? var verbose: Bool? var logMemory: Bool? var action: TestAction? var tests: [String]? } + // Custom value type parsers func tags(tags: String) throws -> Set { // We support specifying multiple tags by splitting on comma, i.e.: // --tags=Array,Dictionary @@ -83,6 +87,9 @@ struct TestConfig { try tags.split(separator: ",").map(String.init).map { try checked({ BenchmarkCategory(rawValue: $0) }, $0) }) } + func finititeDouble(value: String) -> Double? { + return Double(value).flatMap { $0.isFinite ? $0 : nil } + } // Configure the command line argument parser let p = ArgumentParser(into: PartialTestConfig()) @@ -91,11 +98,11 @@ struct TestConfig { parser: { Int($0) }) p.addArgument("--num-iters", \.fixedNumIters, help: "number of iterations averaged in the sample;\n" + - "default: auto-scaled to measure for 1 second", + "default: auto-scaled to measure for `sample-time`", parser: { UInt($0) }) - p.addArgument("--iter-scale", \.iterationScale, - help: "number of seconds used for num-iters calculation\n" + - "default: 1", parser: { Int($0) }) + p.addArgument("--sample-time", \.sampleTime, + help: "duration of test measurement in seconds\ndefault: 1", + parser: finititeDouble) p.addArgument("--verbose", \.verbose, defaultValue: true, help: "increase output verbosity") p.addArgument("--memory", \.logMemory, defaultValue: true, @@ -122,7 +129,7 @@ struct TestConfig { // Configure from the command line arguments, filling in the defaults. delim = c.delim ?? "," - iterationScale = c.iterationScale ?? 1 + sampleTime = c.sampleTime ?? 1.0 fixedNumIters = c.fixedNumIters ?? 0 numSamples = c.numSamples ?? 1 verbose = c.verbose ?? false @@ -141,7 +148,7 @@ struct TestConfig { NumSamples: \(numSamples) Verbose: \(verbose) LogMemory: \(logMemory) - IterScale: \(iterationScale) + SampleTime: \(sampleTime) FixedIters: \(fixedNumIters) Tests Filter: \(c.tests ?? []) Tests to run: \(testList) @@ -336,7 +343,8 @@ func runBench(_ test: BenchmarkInfo, _ c: TestConfig) -> BenchResults? { let sampler = SampleRunner(c) for s in 0..&1 \ RUN: | %FileCheck %s --check-prefix ARGPARSE ARGPARSE: error: unsupported argument '--bogus' -RUN: not %Benchmark_O --iter-scale \ +RUN: not %Benchmark_O --sample-time \ RUN: 2>&1 | %FileCheck %s --check-prefix NOVALUE -NOVALUE: error: missing value for '--iter-scale' +NOVALUE: error: missing value for '--sample-time' -RUN: not %Benchmark_O --iter-scale= \ +RUN: not %Benchmark_O --sample-time= \ RUN: 2>&1 | %FileCheck %s --check-prefix EMPTYVAL -EMPTYVAL: error: missing value for '--iter-scale' +EMPTYVAL: error: missing value for '--sample-time' -RUN: not %Benchmark_O --iter-scale=NaN \ +RUN: not %Benchmark_O --sample-time=NaN \ RUN: 2>&1 | %FileCheck %s --check-prefix NANVALUE -NANVALUE: error: 'NaN' is not a valid 'Int' for '--iter-scale' +NANVALUE: error: 'NaN' is not a valid 'Double' for '--sample-time' RUN: not %Benchmark_O --num-iters \ RUN: 2>&1 | %FileCheck %s --check-prefix NUMITERS From 362f925e3740c18cecaaad414b264c8e2330b495 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 23 Jul 2018 11:08:34 +0200 Subject: [PATCH 34/34] [benchmark][Gardening] Docs and error handling * Improved documentation. * Corrected`fflush` usage in `parse` error handling. * Removed unused `passThroughArgs`. --- benchmark/utils/ArgParse.swift | 71 +++++++++++++++++++++---------- benchmark/utils/DriverUtils.swift | 4 +- 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/benchmark/utils/ArgParse.swift b/benchmark/utils/ArgParse.swift index cf505984120c1..8796e02d6871b 100644 --- a/benchmark/utils/ArgParse.swift +++ b/benchmark/utils/ArgParse.swift @@ -33,9 +33,11 @@ extension ArgumentError: CustomStringConvertible { } } -/// Returns the argument value converted to the type T using the parser. -/// If the parser cannot create the value of specified type, throw an invalid -/// type argument error. +/// Type-checked parsing of the argument value. +/// +/// - Returns: Typed value of the argument converted using the `parse` function. +/// +/// - Throws: `ArgumentError.invalidType` when the conversion fails. func checked( _ parse: (String) throws -> T?, _ value: String, @@ -46,7 +48,7 @@ func checked( if type.starts(with: "Optional<") { let s = type.index(after: type.index(of:"<")!) let e = type.index(before: type.endIndex) // ">" - type = String(type[s.. + type = String(type[s ..< e]) // strip Optional< > } throw ArgumentError.invalidType( value: value, type: type, argument: argument) @@ -70,9 +72,13 @@ class ArgumentParser { private var positionalArgs = [String]() private var optionalArgsMap = [String : String]() - // Argument holds the name of the command line parameter and the value - // processing closure used to convert it into given type and storing it - // in the parsing result. + /// Argument holds the name of the command line parameter, its help + /// desciption and a rule that's applied to process it. + /// + /// The the rule is typically a value processing closure used to convert it + /// into given type and storing it in the parsing result. + /// + /// See also: addArgument, parseArgument struct Argument { let name: String? let help: String? @@ -122,45 +128,41 @@ class ArgumentParser { /// the parsing fails. public func parse() -> U { do { - try parseArgs() - try arguments.forEach { try $0.apply() } // parse all arguments + try parseArgs() // parse the argument syntax + try arguments.forEach { try $0.apply() } // type-check and store values return result } catch let error as ArgumentError { - fflush(stdout) fputs("error: \(error)\n", stderr) - fflush(stderr) exit(1) } catch { + fflush(stdout) fatalError("\(error)") } } /// Using CommandLine.arguments, parses the structure of optional and - /// positional arguments of this program. Failure to parse arguments - /// throws correspondding ArgumentError. + /// positional arguments of this program. /// /// We assume that optional switch args are of the form: /// - /// --opt-name[=opt-value] - /// -opt-name[=opt-value] + /// --opt-name[=opt-value] + /// -opt-name[=opt-value] /// - /// with opt-name and opt-value not containing any '=' signs. Any + /// with `opt-name` and `opt-value` not containing any '=' signs. Any /// other option passed in is assumed to be a positional argument. + /// + /// - Throws: `ArgumentError.unsupportedArgument` on failure to parse + /// the supported argument syntax. private func parseArgs() throws { // For each argument we are passed... - var passThroughArgs = false for arg in CommandLine.arguments[1.. Double? { + func finiteDouble(value: String) -> Double? { return Double(value).flatMap { $0.isFinite ? $0 : nil } } @@ -102,7 +102,7 @@ struct TestConfig { parser: { UInt($0) }) p.addArgument("--sample-time", \.sampleTime, help: "duration of test measurement in seconds\ndefault: 1", - parser: finititeDouble) + parser: finiteDouble) p.addArgument("--verbose", \.verbose, defaultValue: true, help: "increase output verbosity") p.addArgument("--memory", \.logMemory, defaultValue: true,