Skip to content
Browse files

Removed lightspeed test type in favour of performance.

  • Loading branch information...
1 parent 43782af commit c78264f80ae0d1624e2b6a5c986f51ab124c9c6b @marek-kuzora marek-kuzora committed Jul 15, 2012
View
188 source/lightspeed/test_case.coffee
@@ -1,188 +0,0 @@
-#
-# @require:
-# reporter: fest/lightspeed/reporter
-#
-
-
-
-# Number of times a single test should be executed to get
-# a stable time result.
-EXECUTE_RETRY = 5
-
-# Minimum number of runs for a single test case: 1 time.
-MIN_RUN_TIMES = 1
-
-# Maximum number of runs for a single test case: 10 000 000 times.
-MAX_RUN_TIMES = 10000000
-
-
-
-return class LightSpeedTestCase
-
- constructor: (@_runner, @test) ->
-
- # Number of times the test should be run in a loop.
- @args = @_measure_run_args()
-
- # Collection of the test run times.
- @times = []
-
- # Miliseconds to wait before running test invocation.
- @timeout = @_get_time_limit(@args)
-
- # Number of times the test should be executed asynchronously.
- @_counter = EXECUTE_RETRY
-
-
- execute: =>
-
- # Run the test & store it's run time.
- @times.push(@run())
-
- # If the test was executed EXECUTE_RETRY times.
- if --@_counter is 0
-
- # Report finished test case.
- reporter().test_finished(@)
-
- # Schedule asynchronous processing of the next test.
- setTimeout(@_runner.run_next_test, @timeout)
-
- else
-
- # Schedule another asynchronous execution.
- setTimeout(@execute, @timeout)
-
-
- _measure_run_args: ->
-
- # Starting at 1 test invocation & none time.
- arg = MIN_RUN_TIMES
- time = 0
-
- # Run test for the first time to load its environments.
- @run(MIN_RUN_TIMES)
-
- # Iterate fast over number of invocations when the time is none.
- while time is 0 and arg < MAX_RUN_TIMES
- arg *= 10
- time = @run(arg, true) # changed recently!
-
- # Increase number of invocations until the run time is big enough.
- while time < @_get_time_limit(arg) and arg < MAX_RUN_TIMES
- arg *= 2
- time = @run(arg, true) # changed recently!
-
- # Normalize the number of runs to match the appropriate run time.
- return ~~(10 * arg * @_get_time_limit(arg) / @run(arg))
-
-
- #
- # Returns the amount of the time the test should be measured before
- # calculating its final run times. The solid numbers are given in
- # the table below. Limit times for values in between are calculated
- # using lineral interpolation.
- #
- # 1x | 10 ms
- # 25x | 25 ms
- # 250x | 25 ms
- # 1 000x | 10 ms
- # 5 000x | 5 ms
- #
- # @param arg {Number}
- # @return time {Number}
- #
- _get_time_limit: (arg) ->
-
- return 10 if arg is 1
- return 0.625 * (arg - 1) + 10 if arg < 25
- return 25 if arg < 250
- return -0.02 * (arg - 250) + 25 if arg < 1000
- return -0.0008 * (arg - 1000) + 10 if arg < 5000
- return 5
-
-
- run: (args = @args, measure = false) ->
-
- # Push new test hierarchy scope.
- F.push_scope()
-
- # Preload environments.
- F.run(env) for env in @_get_environments()
-
- # Create test scope.
- scope = {}
-
- # Run before method.
- @_run_before(scope)
-
- # Retrieve running args to compare.
- ttime = @_run_test(args, scope)
- trest = if not measure then @_run_constants(args, scope) else 0
-
- # Run after method.
- @_run_after(scope)
-
- # Cache names of the modules loaded after the first run.
- @_envs ?= F.get_loaded_modules()
-
- # Remove the test hierarchy scope.
- F.pop_scope()
-
- # Return final run time or 0 if its negative.
- return if ttime > trest then ttime - trest else 0
-
-
- _run_test: (args, scope) ->
- start = new Date()
- @test.run.call(scope, args)
- return new Date() - start
-
-
- _run_constants: (args, scope) ->
- start = new Date()
- @test.constant.call(scope, args) if @test.constant
- return new Date() - start
-
-
- _run_before: (scope, node = @test) ->
- @_run_before(scope, node.parent) if node.parent
- node.before.call(scope)
-
-
- _run_after: (scope) ->
- node = @test
-
- node.after.call(scope)
- node.after.call(scope) while node = node.parent
-
-
- _get_environments: ->
- node = @test
-
- envs = @test.envs
- envs = envs.concat(node.envs) while node = node.parent
-
- return envs
-
-
- get_ops_per_ms_all: ->
- return (@args/time for time in @times)
-
-
- get_ops_per_ms: ->
- return @args / @get_average_run_time()
-
-
- get_average_run_time: ->
- time = 0
- time += t for t in @times
- return time / EXECUTE_RETRY
-
-
- is_failure: ->
- return false
-
-
- get_failure_text: ->
- return null
View
72 source/lightspeed/type.coffee
@@ -1,72 +0,0 @@
-#
-# @require:
-# types: fest/types
-#
-# Runner: fest/lightspeed/runner
-# TestCase: fest/lightspeed/test_case
-#
-
-
-
-return types().create 'lightspeed',
-
- # Runner running the lightspeed tests.
- runner: new Runner(TestCase)
-
-
- #
- # Additional node initialization code.
- #
- # @param name {String} node name.
- # @param node {Object} node definition.
- #
- node: (name, node) ->
-
- # Before method for the node.
- @before = node.before || (->)
-
- # After method for the test.
- @after = node.after || (->)
-
- # Collection of preloaded modules.
- @envs = node.envs || []
-
- # Constant method for the node. It's purpose is to wrap all
- # time-consuming run operations that cannot be moved from
- # the while loop. TestRunner will substract run-time of this
- # method from the main run method run-time.
- @constant = node.constant
-
-
- #
- # Additional test initialization code.
- #
- # @param name {String} test name.
- # @param test {Object} test definition.
- #
- test: (name, test) ->
-
- # Assert the test isnt asynchronous.
- if test.async
- throw new Error "Performance tests cannot be asynchronous: #{name}"
-
-
- #
- # Additional group initialization code.
- #
- # @param name {String} group name.
- # @param group {Object} group definition.
- #
- group: (name, group) ->
-
- # Assert the group isnt a scenario.
- if group.scenario
- throw new Error "Performance groups cannot be scenarios: #{name}"
-
-
- #
- # Reserved group properties. Properties defined below will be used to
- # define group parameters, whereas all other group properties will be
- # used to create tests.
- #
- reserved: ['after', 'before', 'constant']
View
0 source/lightspeed/reporter.coffee → source/performance/reporter.coffee
File renamed without changes.
View
2 source/lightspeed/runner.coffee → source/performance/runner.coffee
@@ -1,7 +1,7 @@
#
# @require:
# manager: fest/app
-# reporter: fest/lightspeed/reporter
+# reporter: fest/performance/reporter
#
View
198 source/performance/test_case.coffee
@@ -1,78 +1,188 @@
#
# @require:
-# TestCase: fest/lightspeed/test_case
-#
+# reporter: fest/performance/reporter
+#
-return class PerformanceTestCase extends TestCase
+# Number of times a single test should be executed to get
+# a stable time result.
+EXECUTE_RETRY = 5
- constructor: ->
+# Minimum number of runs for a single test case: 1 time.
+MIN_RUN_TIMES = 1
- # Gathering before_each & after_each arrays of functions.
- @_after_each = @_grather_after_each()
- @_before_each = @_grather_before_each()
+# Maximum number of runs for a single test case: 10 000 000 times.
+MAX_RUN_TIMES = 10000000
- # Calling the super class constructor.
- super
- _grather_before_each: ->
- array = []
+return class LightSpeedTestCase
- while node ?= @test
- array.push(node.before_each) if node.before_each
- node = node.parent
+ constructor: (@_runner, @test) ->
- return array.reverse()
+ # Number of times the test should be run in a loop.
+ @args = @_measure_run_args()
+ # Collection of the test run times.
+ @times = []
- _grather_after_each: ->
- array = []
+ # Miliseconds to wait before running test invocation.
+ @timeout = @_get_time_limit(@args)
- while node ?= @test
- array.push(node.after_each) if node.after_each
- node = node.parent
+ # Number of times the test should be executed asynchronously.
+ @_counter = EXECUTE_RETRY
- return array
+ execute: =>
- _run_test: (args, scope) ->
+ # Run the test & store it's run time.
+ @times.push(@run())
- # Start date for full test run time.
- start = new Date()
+ # If the test was executed EXECUTE_RETRY times.
+ if --@_counter is 0
+
+ # Report finished test case.
+ reporter().test_finished(@)
+
+ # Schedule asynchronous processing of the next test.
+ setTimeout(@_runner.run_next_test, @timeout)
+
+ else
+
+ # Schedule another asynchronous execution.
+ setTimeout(@execute, @timeout)
+
+
+ _measure_run_args: ->
+
+ # Starting at 1 test invocation & none time.
+ arg = MIN_RUN_TIMES
+ time = 0
+
+ # Run test for the first time to load its environments.
+ @run(MIN_RUN_TIMES)
+
+ # Iterate fast over number of invocations when the time is none.
+ while time is 0 and arg < MAX_RUN_TIMES
+ arg *= 10
+ time = @run(arg, true) # changed recently!
+
+ # Increase number of invocations until the run time is big enough.
+ while time < @_get_time_limit(arg) and arg < MAX_RUN_TIMES
+ arg *= 2
+ time = @run(arg, true) # changed recently!
+
+ # Normalize the number of runs to match the appropriate run time.
+ return ~~(10 * arg * @_get_time_limit(arg) / @run(arg))
+
+
+ #
+ # Returns the amount of the time the test should be measured before
+ # calculating its final run times. The solid numbers are given in
+ # the table below. Limit times for values in between are calculated
+ # using lineral interpolation.
+ #
+ # 1x | 10 ms
+ # 25x | 25 ms
+ # 250x | 25 ms
+ # 1 000x | 10 ms
+ # 5 000x | 5 ms
+ #
+ # @param arg {Number}
+ # @return time {Number}
+ #
+ _get_time_limit: (arg) ->
+
+ return 10 if arg is 1
+ return 0.625 * (arg - 1) + 10 if arg < 25
+ return 25 if arg < 250
+ return -0.02 * (arg - 250) + 25 if arg < 1000
+ return -0.0008 * (arg - 1000) + 10 if arg < 5000
+ return 5
+
+
+ run: (args = @args, measure = false) ->
+
+ # Push new test hierarchy scope.
+ F.push_scope()
+
+ # Preload environments.
+ F.run(env) for env in @_get_environments()
+
+ # Create test scope.
+ scope = {}
+
+ # Run before method.
+ @_run_before(scope)
- while args--
- #b.call(scope) for b in @_before_each
- @test.run.call(scope)
- #a.call(scope) for a in @_after_each
+ # Retrieve running args to compare.
+ ttime = @_run_test(args, scope)
+ trest = if not measure then @_run_constants(args, scope) else 0
- # Return full test run time
+ # Run after method.
+ @_run_after(scope)
+
+ # Cache names of the modules loaded after the first run.
+ @_envs ?= F.get_loaded_modules()
+
+ # Remove the test hierarchy scope.
+ F.pop_scope()
+
+ # Return final run time or 0 if its negative.
+ return if ttime > trest then ttime - trest else 0
+
+
+ _run_test: (args, scope) ->
+ start = new Date()
+ @test.run.call(scope, args)
return new Date() - start
_run_constants: (args, scope) ->
-
- # Start date for full test run time.
start = new Date()
+ @test.constant.call(scope, args) if @test.constant
+ return new Date() - start
- # Full test run loop.
- #while args--
- # b.call(scope) for b in @_before_each
- # a.call(scope) for a in @_after_each
- # Return full test run time
- return new Date() - start
+ _run_before: (scope, node = @test) ->
+ @_run_before(scope, node.parent) if node.parent
+ node.before.call(scope)
+
+
+ _run_after: (scope) ->
+ node = @test
+
+ node.after.call(scope)
+ node.after.call(scope) while node = node.parent
+
+
+ _get_environments: ->
+ node = @test
+
+ envs = @test.envs
+ envs = envs.concat(node.envs) while node = node.parent
+
+ return envs
+
+
+ get_ops_per_ms_all: ->
+ return (@args/time for time in @times)
+
+
+ get_ops_per_ms: ->
+ return @args / @get_average_run_time()
+
+
+ get_average_run_time: ->
+ time = 0
+ time += t for t in @times
+ return time / EXECUTE_RETRY
is_failure: ->
- return @get_ops_per_ms() > 10000
+ return false
- # TODO moglbym wydzielic informacje dla > 10k, 20k i 30k ops/ms!
get_failure_text: ->
- return if @is_failure()
- then "Test is too fast for standard performance testing." +
- "The results may be inaccurate. Try rewriting the test " +
- "as a lightspeed test."
- else ""
+ return null
View
4 source/performance/type.coffee
@@ -2,8 +2,8 @@
# @require:
# types: fest/types
#
-# Runner: fest/lightspeed/runner
-# TestCase: fest/lightspeed/test_case
+# Runner: fest/performance/runner
+# TestCase: fest/performance/test_case
#

0 comments on commit c78264f

Please sign in to comment.
Something went wrong with that request. Please try again.