Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
27 changed files
with
601 additions
and
379 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,7 +4,6 @@ elixir: | |
- 1.5.3 | ||
- 1.6.4 | ||
otp_release: | ||
- 18.3 | ||
- 19.3 | ||
- 20.3 | ||
matrix: | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
defmodule Benchee.Benchmark.Hooks do | ||
@moduledoc false | ||
# Non benchee code should not rely on this module. | ||
|
||
alias Benchee.Benchmark.{Scenario, ScenarioContext} | ||
|
||
def run_before_scenario( | ||
%Scenario{ | ||
before_scenario: local_before_scenario, | ||
input: input | ||
}, | ||
%ScenarioContext{ | ||
config: %{before_scenario: global_before_scenario} | ||
} | ||
) do | ||
input | ||
|> run_before_function(global_before_scenario) | ||
|> run_before_function(local_before_scenario) | ||
end | ||
|
||
defp run_before_function(input, nil), do: input | ||
defp run_before_function(input, function), do: function.(input) | ||
|
||
def run_before_each( | ||
%{ | ||
before_each: local_before_each | ||
}, | ||
%{ | ||
config: %{before_each: global_before_each}, | ||
scenario_input: input | ||
} | ||
) do | ||
input | ||
|> run_before_function(global_before_each) | ||
|> run_before_function(local_before_each) | ||
end | ||
|
||
def run_after_each( | ||
return_value, | ||
%{ | ||
after_each: local_after_each | ||
}, | ||
%{ | ||
config: %{after_each: global_after_each} | ||
} | ||
) do | ||
if local_after_each, do: local_after_each.(return_value) | ||
if global_after_each, do: global_after_each.(return_value) | ||
end | ||
|
||
def run_after_scenario( | ||
%{ | ||
after_scenario: local_after_scenario | ||
}, | ||
%{ | ||
config: %{after_scenario: global_after_scenario}, | ||
scenario_input: input | ||
} | ||
) do | ||
if local_after_scenario, do: local_after_scenario.(input) | ||
if global_after_scenario, do: global_after_scenario.(input) | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
defmodule Benchee.Benchmark.Measure.NativeTime do | ||
@moduledoc false | ||
|
||
# Measure the time elapsed while executing a given function. | ||
# | ||
# Uses only the time unit native to the platform. Used for determining how many times a function | ||
# should be repeated in `Benchee.Benchmark.Runner.determine_n_times/3` (private method though). | ||
|
||
@behaviour Benchee.Benchmark.Measure | ||
|
||
def measure(function) do | ||
start = :erlang.monotonic_time() | ||
result = function.() | ||
finish = :erlang.monotonic_time() | ||
|
||
{finish - start, result} | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,23 @@ | ||
defmodule Benchee.Benchmark.Measure.Time do | ||
@moduledoc """ | ||
Measure the time consumed by a executing function. | ||
""" | ||
@moduledoc false | ||
|
||
# Measure the time elapsed while executing a given function. | ||
# | ||
# In contrast to `:timer.tc/1` it always returns the result in nano seconds instead of micro | ||
# seconds. This helps us avoid losing precision as both Linux and MacOSX seem to be able to | ||
# measure in nano seconds. `:timer.tc/n` | ||
# [forfeits this precision]( | ||
# https://github.com/erlang/otp/blob/master/lib/stdlib/src/timer.erl#L164-L169). | ||
|
||
@behaviour Benchee.Benchmark.Measure | ||
|
||
def measure(function) do | ||
:timer.tc(function) | ||
start = :erlang.monotonic_time() | ||
result = function.() | ||
finish = :erlang.monotonic_time() | ||
|
||
duration_nano_seconds = :erlang.convert_time_unit(finish - start, :native, :nanosecond) | ||
|
||
{duration_nano_seconds, result} | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
defmodule Benchee.Benchmark.RepeatedMeasurement do | ||
@moduledoc false | ||
|
||
# This module is an internal implementation detail, and should absolutely not be relied upon | ||
# from external clients. | ||
# | ||
# It is used when we can't conduct measurements precise enough with our measurement precision. | ||
# I.e. we can measure in microseconds but we measure a function call to take 1 microsecond then | ||
# most measurements will either be 1 or 2 microseconds which won't give us great results. | ||
# Instead we repeat the function call n times until we measure at least ~10 (time unit) so | ||
# that the difference between measurements can at least be ~10%. | ||
# | ||
# Today this is mostly only relevant on Windows as we have nanosecond precision on Linux and | ||
# Mac OS and we've failed to produce a measurable function call that takes less than 10 nano | ||
# seconds. | ||
# | ||
# That's also why this code lives in a separate module and not `Runner` - as it's rarely used | ||
# and clutters that code + we need a way to test it even if we can't trigger it's conditions. | ||
|
||
# If a function executes way too fast measurements are too unreliable and | ||
# with too high variance. Therefore determine an n how often it should be | ||
# executed in the measurement cycle. | ||
|
||
alias Benchee.Benchmark.{Hooks, Runner, Scenario, ScenarioContext, Measure} | ||
alias Benchee.Utility.RepeatN | ||
|
||
@minimum_execution_time 10 | ||
@times_multiplier 10 | ||
def determine_n_times( | ||
scenario, | ||
scenario_context = %ScenarioContext{ | ||
num_iterations: num_iterations, | ||
printer: printer | ||
}, | ||
fast_warning, | ||
measurer \\ Measure.NativeTime | ||
) do | ||
run_time = measure_iteration(scenario, scenario_context, measurer) | ||
|
||
if run_time >= @minimum_execution_time do | ||
{num_iterations, adjust_for_iterations(run_time, num_iterations)} | ||
else | ||
if fast_warning, do: printer.fast_warning() | ||
|
||
new_context = %ScenarioContext{ | ||
scenario_context | ||
| num_iterations: num_iterations * @times_multiplier | ||
} | ||
|
||
determine_n_times(scenario, new_context, false, measurer) | ||
end | ||
end | ||
|
||
defp adjust_for_iterations(measurement, 1), do: measurement | ||
defp adjust_for_iterations(measurement, num_iterations), do: measurement / num_iterations | ||
|
||
def measure( | ||
scenario, | ||
scenario_context = %ScenarioContext{ | ||
num_iterations: num_iterations | ||
}, | ||
measurer | ||
) do | ||
measurement = measure_iteration(scenario, scenario_context, measurer) | ||
|
||
adjust_for_iterations(measurement, num_iterations) | ||
end | ||
|
||
defp measure_iteration( | ||
scenario, | ||
scenario_context = %ScenarioContext{ | ||
num_iterations: 1 | ||
}, | ||
measurer | ||
) do | ||
Runner.measure(scenario, scenario_context, measurer) | ||
end | ||
|
||
defp measure_iteration( | ||
scenario, | ||
scenario_context = %ScenarioContext{ | ||
num_iterations: iterations | ||
}, | ||
measurer | ||
) | ||
when iterations > 1 do | ||
# When we have more than one iteration, then the repetition and calling | ||
# of hooks is already included in the function, for reference/reasoning see | ||
# `build_benchmarking_function/2` | ||
function = build_benchmarking_function(scenario, scenario_context) | ||
|
||
{measurement, _return_value} = measurer.measure(function) | ||
|
||
measurement | ||
end | ||
|
||
# Builds the appropriate function to benchmark. Takes into account the | ||
# combinations of the following cases: | ||
# | ||
# * an input is specified - creates a 0-argument function calling the original | ||
# function with that input | ||
# * number of iterations - when there's more than one iteration we repeat the | ||
# benchmarking function during execution and measure the the total run time. | ||
# We only run multiple iterations if a function is so fast that we can't | ||
# accurately measure it in one go. Hence, we can't split up the function | ||
# execution and hooks anymore and sadly we also measure the time of the | ||
# hooks. | ||
defp build_benchmarking_function( | ||
%Scenario{ | ||
function: function, | ||
before_each: nil, | ||
after_each: nil | ||
}, | ||
%ScenarioContext{ | ||
num_iterations: iterations, | ||
scenario_input: input, | ||
config: %{after_each: nil, before_each: nil} | ||
} | ||
) | ||
when iterations > 1 do | ||
main = Runner.main_function(function, input) | ||
# with no before/after each we can safely omit them and don't get the hit | ||
# on run time measurements (See PR discussions for this for more info #127) | ||
fn -> RepeatN.repeat_n(main, iterations) end | ||
end | ||
|
||
defp build_benchmarking_function( | ||
scenario = %Scenario{function: function}, | ||
scenario_context = %ScenarioContext{num_iterations: iterations} | ||
) | ||
when iterations > 1 do | ||
fn -> | ||
RepeatN.repeat_n( | ||
fn -> | ||
new_input = Hooks.run_before_each(scenario, scenario_context) | ||
main = Runner.main_function(function, new_input) | ||
return_value = main.() | ||
Hooks.run_after_each(return_value, scenario, scenario_context) | ||
end, | ||
iterations | ||
) | ||
end | ||
end | ||
end |
Oops, something went wrong.