Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,10 @@ function (swift_benchmark_compile_archopts)
list(APPEND common_options "-g")
endif()

if("${optflag}" STREQUAL "Onone")
list(APPEND common_options "-DDEBUG")
endif()

if (is_darwin)
list(APPEND common_options
"-I" "${srcdir}/utils/ObjectiveCTests"
Expand Down Expand Up @@ -400,6 +404,10 @@ function (swift_benchmark_compile_archopts)
"-target" "${target}"
"-${driver_opt}")

if(${optflag} STREQUAL "Onone")
list(APPEND common_options_driver "-DDEBUG")
endif()

if(SWIFT_BENCHMARK_GENERATE_DEBUG_INFO)
list(APPEND common_options_driver "-g")
endif()
Expand Down
12 changes: 3 additions & 9 deletions benchmark/multi-source/Monoids/Benchmark.swift
Original file line number Diff line number Diff line change
@@ -1,20 +1,14 @@
import TestsUtils
import Dispatch

public let benchmarks = [
BenchmarkInfo(
name: "Monoids",
runFunction: run_Monoids,
tags: [.algorithm])
tags: [.algorithm, .miniapplication, .long])
]

func run_Monoids(_ n: Int) {
let semaphore = DispatchSemaphore(value: 0)
func run_Monoids(_ n: Int) async {
for _ in 0 ... n {
Task {
await run(output: false)
semaphore.signal()
}
semaphore.wait()
await run(output: false)
}
}
9 changes: 8 additions & 1 deletion benchmark/scripts/build_script_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,15 @@ def perform_build(args, swiftbuild_path, config, binary_name, opt_flag):
"-Xswiftc",
"-align-module-to-page-size",
"-Xswiftc",
opt_flag,
opt_flag
]

if config == "debug":
swiftbuild_args += [
"-Xswiftc",
"-DDEBUG"
]

if args.verbose:
swiftbuild_args.append("--verbose")
subprocess.call(swiftbuild_args)
Expand Down
48 changes: 30 additions & 18 deletions benchmark/utils/DriverUtils.swift
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@ import LibProc

import TestsUtils

/// Sorry.
private func ??<T>(_ x: T?, _ y: @autoclosure () async -> T) async -> T {
if let x { return x }
return await y()
}

struct MeasurementMetadata {
// Note: maxRSS and pages subtract the RSS measured
// after the benchmark driver setup has finished.
Expand Down Expand Up @@ -198,10 +204,16 @@ struct TestConfig {
action = c.action ?? .run
allowNondeterministicHashing = c.allowNondeterministicHashing ?? false
jsonOutput = c.jsonOutput ?? false

var skipTags: Set<BenchmarkCategory>
skipTags = c.tags ?? [.unstable, .skip]
#if DEBUG
skipTags.insert(.long)
#endif
tests = TestConfig.filterTests(registeredBenchmarks,
tests: c.tests ?? [],
tags: c.tags ?? [],
skipTags: c.skipTags ?? [.unstable, .skip])
skipTags: skipTags)

if tests.count > 0 {
testNameLength = tests.map{$0.info.name.count}.sorted().reversed().first!
Expand Down Expand Up @@ -481,13 +493,13 @@ final class TestRunner {
}

/// Measure the `fn` and return the average sample time per iteration (μs).
func measure(_ name: String, fn: (Int) -> Void, numIters: Int) -> Double {
func measure(_ name: String, fn: (Int) async -> Void, numIters: Int) async -> Double {
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
name.withCString { p in startTrackingObjects(p) }
#endif

startMeasurement()
fn(numIters)
await fn(numIters)
stopMeasurement()

#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
Expand All @@ -502,7 +514,7 @@ final class TestRunner {
}

/// Run the benchmark and return the measured results.
func run(_ test: BenchmarkInfo) -> BenchResults? {
func run(_ test: BenchmarkInfo) async -> BenchResults? {
// Before we do anything, check that we actually have a function to
// run. If we don't it is because the benchmark is not supported on
// the platform and we should skip it.
Expand All @@ -528,8 +540,8 @@ final class TestRunner {
}

// Determine number of iterations for testFn to run for desired time.
func iterationsPerSampleTime() -> (numIters: Int, oneIter: Double) {
let oneIter = measure(test.name, fn: testFn, numIters: 1)
func iterationsPerSampleTime() async -> (numIters: Int, oneIter: Double) {
let oneIter = await measure(test.name, fn: testFn, numIters: 1)
if oneIter > 0 {
let timePerSample = c.sampleTime * 1_000_000.0 // microseconds (μs)
return (max(Int(timePerSample / oneIter), 1), oneIter)
Expand All @@ -540,28 +552,28 @@ final class TestRunner {

// Determine the scale of measurements. Re-use the calibration result if
// it is just one measurement.
func calibrateMeasurements() -> Int {
let (numIters, oneIter) = iterationsPerSampleTime()
func calibrateMeasurements() async -> Int {
let (numIters, oneIter) = await iterationsPerSampleTime()
if numIters == 1 { addSample(oneIter) }
else { resetMeasurements() } // for accurate yielding reports
return numIters
}

let numIters = min( // Cap to prevent overflow on 32-bit systems when scaled
Int.max / 10_000, // by the inner loop multiplier inside the `testFn`.
c.numIters ?? calibrateMeasurements())
await c.numIters ?? (await calibrateMeasurements()))

let numSamples = c.numSamples ??
let numSamples = await c.numSamples ??
// Compute the number of samples to measure for `sample-time`,
// clamped in (`min-samples`, 200) range, if the `num-iters` are fixed.
max(c.minSamples ?? 1, min(200, c.numIters == nil ? 1 :
calibrateMeasurements()))
(max(await c.minSamples ?? 1, min(200, c.numIters == nil ? 1 :
await calibrateMeasurements())))

samples.reserveCapacity(numSamples)
logVerbose(" Collecting \(numSamples) samples.")
logVerbose(" Measuring with scale \(numIters).")
for _ in samples.count..<numSamples {
addSample(measure(test.name, fn: testFn, numIters: numIters))
addSample(await measure(test.name, fn: testFn, numIters: numIters))
}

test.tearDownFunction?()
Expand Down Expand Up @@ -681,16 +693,16 @@ final class TestRunner {
}

/// Run each benchmark and emit the results in JSON
func runBenchmarks() {
func runBenchmarks() async {
var testCount = 0
if !c.jsonOutput {
printTextHeading()
}
for (index, info) in c.tests {
if c.jsonOutput {
printJSON(index: index, info: info, results: run(info))
printJSON(index: index, info: info, results: await run(info))
} else {
printText(index: index, info: info, results: run(info))
printText(index: index, info: info, results: await run(info))
}
testCount += 1
}
Expand All @@ -712,7 +724,7 @@ extension Hasher {
}
}

public func main() {
public func main() async {
let config = TestConfig(registeredBenchmarks)
switch (config.action) {
case .listTests:
Expand Down Expand Up @@ -742,7 +754,7 @@ public func main() {
the option '--allow-nondeterministic-hashing to the benchmarking executable.
""")
}
TestRunner(config).runBenchmarks()
await TestRunner(config).runBenchmarks()
if let x = config.afterRunSleep {
sleep(x)
}
Expand Down
9 changes: 6 additions & 3 deletions benchmark/utils/TestsUtils.swift
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ public enum BenchmarkCategory : String {
// significant optimization.
case cpubench

// Benchmarks to skip on -Onone runs.
case long

// Explicit skip marker
case skip
}
Expand Down Expand Up @@ -113,10 +116,10 @@ public struct BenchmarkInfo {
public var name: String

/// Shadow static variable for runFunction.
private var _runFunction: (Int) -> ()
private var _runFunction: (Int) async -> ()

/// A function that invokes the specific benchmark routine.
public var runFunction: ((Int) -> ())? {
public var runFunction: ((Int) async -> ())? {
if !shouldRun {
return nil
}
Expand Down Expand Up @@ -171,7 +174,7 @@ public struct BenchmarkInfo {
/// to be interrupted by a context switch.
public var legacyFactor: Int?

public init(name: String, runFunction: @escaping (Int) -> (), tags: [BenchmarkCategory],
public init(name: String, runFunction: @escaping (Int) async -> (), tags: [BenchmarkCategory],
setUpFunction: (() -> ())? = nil,
tearDownFunction: (() -> ())? = nil,
unsupportedPlatforms: BenchmarkPlatformSet = [],
Expand Down
2 changes: 1 addition & 1 deletion benchmark/utils/main.swift
Original file line number Diff line number Diff line change
Expand Up @@ -421,4 +421,4 @@ register(Walsh.benchmarks)
register(WordCount.benchmarks)
register(XorLoop.benchmarks)

main()
await main()