-
Notifications
You must be signed in to change notification settings - Fork 46
/
TestRunner.cs
185 lines (162 loc) · 6.78 KB
/
TestRunner.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
// Copyright (c) Petabridge <https://petabridge.com/>. All rights reserved.
// Licensed under the Apache 2.0 license. See LICENSE file in the project root for full license information.
using NBench.Reporting;
using NBench.Reporting.Targets;
using NBench.Sdk.Compiler;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.Contracts;
using System.Threading;
using IBenchmarkOutput = NBench.Reporting.IBenchmarkOutput;
namespace NBench.Sdk
{
/// <summary>
/// Results collected by the test runner
/// </summary>
public class TestRunnerResult
{
public bool AllTestsPassed { get; set; }
public int ExecutedTestsCount { get; set; }
public int IgnoredTestsCount { get; set; }
public IReadOnlyList<BenchmarkFinalResults> FullResults { get; set; }
}
/// <summary>
/// Executor of tests
/// </summary>
/// <remarks>Will be created in separated appDomain therefor it have to be marshaled.</remarks>
public class TestRunner
{
/// <summary>
/// Can't apply some of our optimization tricks if running Mono, due to need for elevated permissions
/// </summary>
public static readonly bool IsMono = Type.GetType("Mono.Runtime") != null;
private readonly TestPackage _package;
private readonly List<BenchmarkFinalResults> _results = new List<BenchmarkFinalResults>();
private IBenchmarkOutput _resultsCollector;
/// <summary>
/// Initializes a new instance of the test runner.
/// </summary>
/// <param name="package">The test package to be executed</param>
public TestRunner(TestPackage package)
{
_package = package;
_resultsCollector = new ActionBenchmarkOutput(benchmarkAction: f => { _results.Add(f); });
}
/// <summary>
/// Executes the test package.
/// </summary>
/// <param name="package">The test package to execute.</param>
/// <remarks>Creates a new instance of <see cref="TestRunner"/> and executes the tests.</remarks>
public static TestRunnerResult Run(TestPackage package)
{
Contract.Requires(package != null);
var runner = new TestRunner(package);
return runner.Execute();
}
/// <summary>
/// Initializes the process and thread
/// </summary>
public static void SetProcessPriority(bool concurrent)
{
/*
* Set processor affinity
*/
if (!concurrent)
{
var proc = Process.GetCurrentProcess();
proc.ProcessorAffinity = new IntPtr(2); // strictly the second processor!
}
/*
* Set priority
*/
if (!IsMono)
Process.GetCurrentProcess().PriorityClass = ProcessPriorityClass.High;
if (!concurrent)
{
/*
* If we're running in concurrent mode, don't give the foreground thread higher priority
* over the other threads participating in NBench specs. Treat them all equally with the same
* priority.
*/
Thread.CurrentThread.Priority = ThreadPriority.Highest;
}
}
/// <summary>
/// Executes the tests
/// </summary>
/// <returns>True if all tests passed.</returns>
public TestRunnerResult Execute()
{
// Perform core / thread optimizations if we're running in single-threaded mode
// But not if the user has specified that they're going to be running multi-threaded benchmarks
SetProcessPriority(_package.Concurrent);
// pass in the runner settings so we can include them in benchmark reports
// also, toggles tracing on or off
var runnerSettings = new RunnerSettings()
{
ConcurrentModeEnabled = _package.Concurrent,
TracingEnabled = _package.Tracing
};
IBenchmarkOutput output = CreateOutput();
var discovery = new ReflectionDiscovery(output,
DefaultBenchmarkAssertionRunner.Instance, // one day we might be able to pass in custom assertion runners, hence why this is here
runnerSettings);
var result = new TestRunnerResult()
{
AllTestsPassed = true,
FullResults = _results
};
try
{
foreach (var assembly in _package.TestAssemblies)
{
output.WriteLine($"Executing Benchmarks in {assembly}");
var benchmarks = discovery.FindBenchmarks(assembly);
foreach (var benchmark in benchmarks)
{
// verify if the benchmark should be included/excluded from the list of benchmarks to be run
if (_package.ShouldRunBenchmark(benchmark.BenchmarkName))
{
output.StartBenchmark(benchmark.BenchmarkName);
benchmark.Run();
benchmark.Finish();
// if one assert fails, all fail
result.AllTestsPassed = result.AllTestsPassed && benchmark.AllAssertsPassed;
output.FinishBenchmark(benchmark.BenchmarkName);
result.ExecutedTestsCount = result.ExecutedTestsCount + 1;
}
else
{
output.SkipBenchmark(benchmark.BenchmarkName);
result.IgnoredTestsCount = result.IgnoredTestsCount + 1;
}
}
}
}
catch (Exception ex)
{
output.Error(ex, "Error while executing the tests.");
result.AllTestsPassed = false;
}
return result;
}
/// <summary>
/// Creates the benchmark output writer
/// </summary>
/// <returns></returns>
protected virtual IBenchmarkOutput CreateOutput()
{
var outputs = new List<IBenchmarkOutput>() { _resultsCollector };
var consoleOutput = _package.TeamCity ?
new TeamCityBenchmarkOutput()
: (IBenchmarkOutput)new ConsoleBenchmarkOutput();
outputs.Add(consoleOutput);
if (!string.IsNullOrEmpty(_package.OutputDirectory))
{
outputs.Add(new MarkdownBenchmarkOutput(_package.OutputDirectory));
}
return new CompositeBenchmarkOutput(outputs.ToArray());
}
}
}