-
Notifications
You must be signed in to change notification settings - Fork 36
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
test(Benchmark): pulled in benchmark in examples folder form elm-benc…
…hmark test(convertingDates): adjusted still no closer to resolving issue #6
- Loading branch information
Showing
7 changed files
with
4,364 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,212 @@ | ||
module Benchmark | ||
( Never | ||
, Benchmark | ||
, Suite (..) | ||
, BenchStats | ||
, run | ||
, runWithProgress | ||
, bench | ||
, bench1 | ||
, bench2 | ||
, bench3 | ||
, bench4 | ||
, bench5 | ||
, bench6 | ||
, bench7 | ||
, bench8 | ||
, bench9 | ||
)where | ||
{-| A wrapper around benchmark.js that allows benchmarking | ||
of pure functions to be evaluated. | ||
# Error and Benchmark types | ||
@docs Never, Benchmark, Suite, BenchStats | ||
# Running benchmarks | ||
@docs run, runWithProgress | ||
# Creating benchmarks | ||
@docs bench, bench1, bench2, bench3, bench4, bench5, bench6, bench7, bench8, bench9 | ||
-} | ||
|
||
|
||
import Task | ||
import String | ||
import Native.Benchmark | ||
import Native.BenchmarkJS | ||
import Signal | ||
import Time | ||
|
||
|
||
{-| | ||
Our error return type: can never be instantiated | ||
because running benchmarks should not fail | ||
|-} | ||
type Never = | ||
Never Never | ||
|
||
|
||
{-| | ||
Opaque type representing a function to be timed | ||
|-} | ||
type Benchmark = | ||
Benchmark | ||
|
||
|
||
{-| | ||
The results of running a benchmark | ||
|-} | ||
type BenchStats = | ||
BenchStats | ||
{ name : String | ||
, hz : Float | ||
, marginOfError : Float | ||
, moePercent : Float | ||
-- , numRunsSampled : Int | ||
} | ||
|
||
|
||
{-| | ||
A single or collection of benchmarks | ||
that can be run, generating output | ||
|-} | ||
type Suite = | ||
SingleBenchmark Benchmark | ||
| Suite String (List Benchmark) | ||
|
||
|
||
{-| | ||
Run a benchmark, generating a list of results for each benchmark. | ||
Results contain a pretty-printed string summary, and a list | ||
of detailed data for each benchmark. | ||
|-} | ||
run | ||
: Suite | ||
-> Task.Task Never (String, List BenchStats) | ||
run = runWithProgress Nothing | ||
|
||
|
||
{-| | ||
Run a benchmark, generating a list of results for each benchmark | ||
and updating a String signal with progress as the benchmarks run | ||
|-} | ||
runWithProgress | ||
: Maybe (Signal.Mailbox String) | ||
-> Suite | ||
-> Task.Task Never (String, List BenchStats) | ||
runWithProgress maybeMailbox suite = | ||
let | ||
ourTask = | ||
case maybeMailbox of | ||
Nothing -> \_ -> Task.sleep 0 | ||
Just mailbox -> | ||
\s -> | ||
(Signal.send mailbox.address s | ||
`Task.andThen` \_ -> Task.sleep (5*Time.second)) | ||
-- `Task.andThen` \_ -> Task.keepGoing | ||
in | ||
Native.Benchmark.runWithProgress ourTask suite | ||
|
||
{-| | ||
Create a benchmark with the given name | ||
running an arbitrary thunk | ||
|-} | ||
bench : String -> (() -> result) -> Benchmark | ||
bench = Native.Benchmark.makeBenchmark | ||
|
||
|
||
{-| | ||
Functions for creating named benchmarks with 1 to 9 arguments | ||
|-} | ||
bench1 : String -> (a -> result) -> a -> Benchmark | ||
bench1 name f a = | ||
Native.Benchmark.makeBenchmark name (\_ -> f a) | ||
|
||
|
||
{-||-} | ||
bench2 : String -> (a -> b -> result) -> a -> b -> Benchmark | ||
bench2 name f a b = | ||
Native.Benchmark.makeBenchmark name (\_ -> f a b) | ||
|
||
|
||
{-||-} | ||
bench3 : String -> (a -> b -> c -> result) -> a -> b -> c -> Benchmark | ||
bench3 name f a b c = | ||
Native.Benchmark.makeBenchmark name (\_ -> f a b c) | ||
|
||
|
||
{-||-} | ||
bench4 : String -> (a -> b -> c -> d -> result) -> a -> b -> c -> d -> Benchmark | ||
bench4 name f a b c d = | ||
Native.Benchmark.makeBenchmark name (\_ -> f a b c d) | ||
|
||
|
||
{-||-} | ||
bench5 : String -> (a -> b -> c -> d -> e -> result) -> a -> b -> c -> d -> e -> Benchmark | ||
bench5 name f a b c d e = | ||
Native.Benchmark.makeBenchmark name (\_ -> f a b c d e) | ||
|
||
|
||
{-||-} | ||
bench6 : | ||
String | ||
-> (a -> b -> c -> d -> e -> f -> result) | ||
-> a | ||
-> b | ||
-> c | ||
-> d | ||
-> e | ||
-> f | ||
-> Benchmark | ||
bench6 name fn a b c d e f = | ||
Native.Benchmark.makeBenchmark name (\_ -> fn a b c d e f) | ||
|
||
|
||
{-||-} | ||
bench7 : | ||
String | ||
-> (a -> b -> c -> d -> e -> f -> g -> result) | ||
-> a | ||
-> b | ||
-> c | ||
-> d | ||
-> e | ||
-> f | ||
-> g | ||
-> Benchmark | ||
bench7 name fn a b c d e f g = | ||
Native.Benchmark.makeBenchmark name (\_ -> fn a b c d e f g) | ||
|
||
|
||
{-||-} | ||
bench8 : | ||
String | ||
-> (a -> b -> c -> d -> e -> f -> g -> h -> result) | ||
-> a | ||
-> b | ||
-> c | ||
-> d | ||
-> e | ||
-> f | ||
-> g | ||
-> h | ||
-> Benchmark | ||
bench8 name fn a b c d e f g h = | ||
Native.Benchmark.makeBenchmark name (\_ -> fn a b c d e f g h) | ||
|
||
{-||-} | ||
bench9 : | ||
String | ||
-> (a -> b -> c -> d -> e -> f -> g -> h -> i -> result) | ||
-> a | ||
-> b | ||
-> c | ||
-> d | ||
-> e | ||
-> f | ||
-> g | ||
-> h | ||
-> i | ||
-> Benchmark | ||
bench9 name fn a b c d e f g h i = | ||
Native.Benchmark.makeBenchmark name (\_ -> fn a b c d e f g h i) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,102 @@ | ||
Elm.Native.Benchmark = {}; | ||
Elm.Native.Benchmark.make = function(localRuntime) { | ||
|
||
localRuntime.Native = localRuntime.Native || {}; | ||
localRuntime.Native.Benchmark = localRuntime.Native.Benchmark || {}; | ||
if (localRuntime.Native.Benchmark.values) | ||
{ | ||
return localRuntime.Native.Benchmark.values; | ||
} | ||
|
||
var List = Elm.Native.List.make(localRuntime); | ||
var Task = Elm.Native.Task.make(localRuntime); | ||
var Signal = Elm.Signal.make(localRuntime); | ||
var Utils = Elm.Native.Utils.make(localRuntime); | ||
|
||
//Nothing special happens here, but we need to use it as a Native function | ||
//So that we can accept thunks of any type | ||
function makeBenchmark(name, thunk) | ||
{ | ||
return {name : name, thunk : thunk}; | ||
} | ||
|
||
//Generate the task for running a benchmark suite | ||
//Possibly updating a given mailbox with a string describing | ||
//Our progress of running the benchmarks so far | ||
function runWithProgress(maybeTaskFn, inSuite) | ||
{ | ||
|
||
return Task.asyncFunction(function(callback) { | ||
var bjsSuite = new Benchmark.Suite; | ||
var benchArray; | ||
var retData = []; | ||
var finalString = ""; | ||
var numCompleted = 0; | ||
var numToRun; | ||
|
||
switch (inSuite.ctor) | ||
{ | ||
case "Suite": | ||
benchArray = List.toArray(inSuite._1); | ||
break; | ||
case "SingleBenchmark": | ||
benchArray = [inSuite._0 ]; | ||
break; | ||
} | ||
|
||
numToRun = benchArray.length; | ||
Task.perform(maybeTaskFn("Running benchmark 1 of " + numToRun)); | ||
|
||
//Initialize each benchmark in the suite | ||
for (i = 0; i < benchArray.length; i++) | ||
{ | ||
var ourThunk = function (){ | ||
//Run the thing we're timing, then mark the asynch benchmark as done | ||
benchArray[i].thunk(); | ||
deferred.resolve(); | ||
} | ||
bjsSuite.add(benchArray[i].name, benchArray[i].thunk ); | ||
} | ||
|
||
//Every time a benchmark finishes, we store its results | ||
//and update our progress string | ||
bjsSuite.on('cycle', function(event) { | ||
numCompleted += 1; | ||
retData.push( | ||
{ name : event.target.options.name | ||
, hz : event.target.hz | ||
, marginOfError : event.target.stats.moe | ||
, moePercent : event.target.stats.rme | ||
} | ||
); | ||
finalString += String(event.target) + "\n"; | ||
var intermedString = | ||
"Running benchmark " | ||
+ (numCompleted + 1) | ||
+ " of " + numToRun | ||
+ "\nLast result: " + String(event.target); | ||
Task.perform(maybeTaskFn(intermedString)); | ||
//retString += String(event.target) + "\n"; | ||
}); | ||
|
||
//When the last benchmark finishes, we show all results collected | ||
bjsSuite.on('complete', function(event) { | ||
Task.perform(maybeTaskFn("Final results:\n\n" + finalString) ); | ||
return callback(Task.succeed(Utils.Tuple2(finalString, List.fromArray(retData)))); | ||
}); | ||
|
||
//Finally: actually run the suite | ||
Task.perform( | ||
Task.asyncFunction(function(otherCallback){ | ||
bjsSuite.run({ 'async': true }); | ||
})); | ||
}); | ||
} | ||
|
||
return localRuntime.Native.Benchmark.values = { | ||
makeBenchmark: F2(makeBenchmark), | ||
runWithProgress: F2(runWithProgress) | ||
}; | ||
|
||
|
||
}; |
Oops, something went wrong.