diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver index 4faa9a1465e4c..fe70d7d4108ab 100755 --- a/benchmark/scripts/Benchmark_Driver +++ b/benchmark/scripts/Benchmark_Driver @@ -435,6 +435,19 @@ class BenchmarkDoctor(object): "Decrease the workload of '%s' by a factor of %d (%d), to be " "less than %d μs.", name, factor(2), factor(10), threshold) + threshold = 20 + if runtime < threshold: + log = (BenchmarkDoctor.log_runtime.error if runtime == 0 else + BenchmarkDoctor.log_runtime.warning) + log("'%s' execution took %d μs.", name, runtime) + + BenchmarkDoctor.log_runtime.info( + "Ensure the workload of '%s' has a properly measurable size" + " (runtime > %d μs) and is not eliminated by the compiler (use" + " `blackHole` function if necessary)." if runtime == 0 else + "Increase the workload of '%s' to be more than %d μs.", + name, threshold) + @staticmethod def _setup_overhead(measurements): select = BenchmarkDoctor._select @@ -442,7 +455,8 @@ class BenchmarkDoctor(object): [[result.samples.min for result in i_series] for i_series in [select(measurements, num_iters=i) for i in [1, 2]]]] - setup = int(round(2.0 * (ti1 - ti2))) + setup = (int(round(2.0 * (ti1 - ti2))) if ti2 > 20 # limit of accuracy + else 0) ratio = (setup / ti1) if ti1 > 0 else 0 return (setup, ratio) diff --git a/benchmark/scripts/test_Benchmark_Driver.py b/benchmark/scripts/test_Benchmark_Driver.py index 6906074d75d1b..51cc95b29afbf 100644 --- a/benchmark/scripts/test_Benchmark_Driver.py +++ b/benchmark/scripts/test_Benchmark_Driver.py @@ -667,7 +667,7 @@ def test_benchmark_name_is_at_most_40_chars_long(self): self.logs['info']) def test_benchmark_runtime_range(self): - """Optimized benchmark should run in less then 1000 μs. + """Optimized benchmark should have runtime between 20 μs and 1000 μs. Even on calm machine, benchmark with runtime of 2500 μs has 1:4 chance of being interrupted in the middle of measurement due to elapsed 10 ms @@ -687,6 +687,8 @@ def measurements(name, runtime): with captured_output() as (out, _): doctor = BenchmarkDoctor(self.args, BenchmarkDriverMock([])) + doctor.analyze(measurements('Sylph', 0)) + doctor.analyze(measurements('Unicorn', 3)) doctor.analyze(measurements('Cheetah', 200)) doctor.analyze(measurements('Hare', 1001)) doctor.analyze(measurements('Tortoise', 500000)) @@ -697,6 +699,18 @@ def measurements(name, runtime): self.assertIn('runtime: ', output) self.assertNotIn('Cheetah', output) + self.assert_contains(["'Sylph' execution took 0 μs."], + self.logs['error']) + self.assert_contains( + ["Ensure the workload of 'Sylph' has a properly measurable size" + " (runtime > 20 μs) and is not eliminated by the compiler (use " + "`blackHole` function if necessary)."], + self.logs['info']) + self.assert_contains(["'Unicorn' execution took 3 μs."], + self.logs['warning']) + self.assert_contains( + ["Increase the workload of 'Unicorn' to be more than 20 μs."], + self.logs['info']) self.assert_contains(["'Hare' execution took at least 1001 μs."], self.logs['warning']) self.assert_contains( @@ -728,11 +742,18 @@ def test_benchmark_has_no_significant_setup_overhead(self): 'SO O i2a': _PTR(min=67), 'SO O i2b': _PTR(min=68)}) doctor.analyze({'name': 'Zero', 'Zero O i1a': _PTR(min=0), 'Zero O i2a': _PTR(min=0)}) + doctor.analyze({ + 'name': 'LOA', # Limit of Accuracy + # Impossible to detect overhead: + # Even 1μs change in 20μs runtime is 5%. + 'LOA O i1a': _PTR(min=21), + 'LOA O i2a': _PTR(min=20)}) output = out.getvalue() self.assertIn('runtime: ', output) self.assertNotIn('NoOverhead', output) self.assertNotIn('ZeroRuntime', output) + self.assertNotIn('LOA', output) self.assert_contains( ["'SO' has setup overhead of 4 μs (5.8%)."], self.logs['error']) diff --git a/benchmark/single-source/SetTests.swift b/benchmark/single-source/SetTests.swift index b317ff815ff2d..caa64a6f23118 100644 --- a/benchmark/single-source/SetTests.swift +++ b/benchmark/single-source/SetTests.swift @@ -58,10 +58,15 @@ let setQ: Set = { public let SetTests = [ // Mnemonic: number after name is percentage of common elements in input sets. BenchmarkInfo( - name: "Set.Empty.IsSubsetInt0", + name: "Set.isSubset.Empty.Int", runFunction: { n in run_SetIsSubsetInt(setE, setAB, true, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setE, setAB]) }), + BenchmarkInfo( + name: "Set.isSubset.Int.Empty", + runFunction: { n in run_SetIsSubsetInt(setAB, setE, false, 5000 * n) }, + tags: [.validation, .api, .Set], + setUpFunction: { blackHole([setAB, setE]) }), BenchmarkInfo( name: "SetIsSubsetInt0", runFunction: { n in run_SetIsSubsetInt(setAB, setCD, false, 5000 * n) }, @@ -94,46 +99,56 @@ public let SetTests = [ setUpFunction: { blackHole([setP, setQ]) }), BenchmarkInfo( - name: "Set.Empty.IsDisjointInt0", - runFunction: { n in run_SetIsDisjointInt(setE, setAB, true, 50 * n) }, + name: "Set.isDisjoint.Empty.Int", + runFunction: { n in run_SetIsDisjointInt(setE, setAB, true, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setE, setAB]) }), BenchmarkInfo( - name: "Set.Empty.IsDisjointBox0", - runFunction: { n in run_SetIsDisjointBox(setOE, setOAB, true, 50 * n) }, + name: "Set.isDisjoint.Int.Empty", + runFunction: { n in run_SetIsDisjointInt(setAB, setE, true, 5000 * n) }, + tags: [.validation, .api, .Set], + setUpFunction: { blackHole([setAB, setE]) }), + BenchmarkInfo( + name: "Set.isDisjoint.Empty.Box", + runFunction: { n in run_SetIsDisjointBox(setOE, setOAB, true, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setOE, setOAB]) }), BenchmarkInfo( - name: "SetIsDisjointInt0", + name: "Set.isDisjoint.Box.Empty", + runFunction: { n in run_SetIsDisjointBox(setOAB, setOE, true, 5000 * n) }, + tags: [.validation, .api, .Set], + setUpFunction: { blackHole([setOAB, setOE]) }), + BenchmarkInfo( + name: "Set.isDisjoint.Int0", runFunction: { n in run_SetIsDisjointInt(setAB, setCD, true, 50 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setAB, setCD]) }), BenchmarkInfo( - name: "SetIsDisjointBox0", + name: "Set.isDisjoint.Box0", runFunction: { n in run_SetIsDisjointBox(setOAB, setOCD, true, 50 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setOAB, setOCD]) }), BenchmarkInfo( - name: "SetIsDisjointInt25", - runFunction: { n in run_SetIsDisjointInt(setB, setAB, false, 50 * n) }, + name: "Set.isDisjoint.Int25", + runFunction: { n in run_SetIsDisjointInt(setB, setAB, false, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setB, setAB]) }), BenchmarkInfo( - name: "SetIsDisjointBox25", - runFunction: { n in run_SetIsDisjointBox(setOB, setOAB, false, 50 * n) }, + name: "Set.isDisjoint.Box25", + runFunction: { n in run_SetIsDisjointBox(setOB, setOAB, false, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setOB, setOAB]) }), BenchmarkInfo( - name: "SetIsDisjointInt50", - runFunction: { n in run_SetIsDisjointInt(setY, setXY, false, 50 * n) }, + name: "Set.isDisjoint.Int50", + runFunction: { n in run_SetIsDisjointInt(setY, setXY, false, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setY, setXY]) }), BenchmarkInfo( - name: "SetIsDisjointInt100", - runFunction: { n in run_SetIsDisjointInt(setP, setQ, false, 50 * n) }, + name: "Set.isDisjoint.Int100", + runFunction: { n in run_SetIsDisjointInt(setP, setQ, false, 5000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setP, setQ]) }), - + BenchmarkInfo( name: "SetSymmetricDifferenceInt0", runFunction: { n in run_SetSymmetricDifferenceInt(setAB, setCD, countABCD, 10 * n) }, @@ -228,15 +243,25 @@ public let SetTests = [ setUpFunction: { blackHole([setP, setQ]) }), BenchmarkInfo( - name: "Set.Empty.SubtractingInt0", - runFunction: { n in run_SetSubtractingInt(setE, setAB, 0, 10 * n) }, + name: "Set.subtracting.Empty.Int", + runFunction: { n in run_SetSubtractingInt(setE, setAB, 0, 1000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setE, setAB]) }), BenchmarkInfo( - name: "Set.Empty.SubtractingBox0", - runFunction: { n in run_SetSubtractingBox(setOE, setOAB, 0, 10 * n) }, + name: "Set.subtracting.Int.Empty", + runFunction: { n in run_SetSubtractingInt(setAB, setE, countAB, 1000 * n) }, + tags: [.validation, .api, .Set], + setUpFunction: { blackHole([setAB, setE]) }), + BenchmarkInfo( + name: "Set.subtracting.Empty.Box", + runFunction: { n in run_SetSubtractingBox(setOE, setOAB, 0, 1000 * n) }, tags: [.validation, .api, .Set], setUpFunction: { blackHole([setOE, setOAB]) }), + BenchmarkInfo( + name: "Set.subtracting.Box.Empty", + runFunction: { n in run_SetSubtractingBox(setOAB, setOE, countAB, 1000 * n) }, + tags: [.validation, .api, .Set], + setUpFunction: { blackHole([setOAB, setOE]) }), BenchmarkInfo( name: "SetSubtractingInt0", runFunction: { n in run_SetSubtractingInt(setAB, setCD, countAB, 10 * n) },