forked from npshub/mantid
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ThreadSchedulerMutexesTest.h
169 lines (145 loc) · 5.13 KB
/
ThreadSchedulerMutexesTest.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
// Mantid Repository : https://github.com/mantidproject/mantid
//
// Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
// NScD Oak Ridge National Laboratory, European Spallation Source,
// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
// SPDX - License - Identifier: GPL - 3.0 +
#pragma once
#include "MantidKernel/System.h"
#include "MantidKernel/Timer.h"
#include <utility>
#include <cxxtest/TestSuite.h>
#include <memory>
#include "MantidKernel/ThreadSchedulerMutexes.h"
using namespace Mantid::Kernel;
int ThreadSchedulerMutexesTest_timesDeleted;
class ThreadSchedulerMutexesTest : public CxxTest::TestSuite {
public:
/** A custom implementation of Task,
* that sets its mutex */
class TaskWithMutex : public Task {
public:
TaskWithMutex(std::shared_ptr<std::mutex> mutex, double cost) {
m_mutex = std::move(mutex);
m_cost = cost;
}
/// Count # of times destructed in the destructor
~TaskWithMutex() override { ThreadSchedulerMutexesTest_timesDeleted++; }
void run() override {
// TaskTestNamespace::my_check_value = 123;
}
};
void test_push() {
ThreadSchedulerMutexes sc;
auto mut1 = std::make_shared<std::mutex>();
auto mut2 = std::make_shared<std::mutex>();
auto task1 = std::make_shared<TaskWithMutex>(mut1, 10.0);
auto task2 = std::make_shared<TaskWithMutex>(mut2, 9.0);
sc.push(task1);
TS_ASSERT_EQUALS(sc.size(), 1);
sc.push(task2);
TS_ASSERT_EQUALS(sc.size(), 2);
}
void test_queue() {
ThreadSchedulerMutexes sc;
auto mut1 = std::make_shared<std::mutex>();
auto mut2 = std::make_shared<std::mutex>();
auto mut3 = std::make_shared<std::mutex>();
auto task1 = std::make_shared<TaskWithMutex>(mut1, 10.0);
auto task2 = std::make_shared<TaskWithMutex>(mut1, 9.0);
auto task3 = std::make_shared<TaskWithMutex>(mut1, 8.0);
auto task4 = std::make_shared<TaskWithMutex>(mut2, 7.0);
auto task5 = std::make_shared<TaskWithMutex>(mut2, 6.0);
auto task6 = std::make_shared<TaskWithMutex>(mut3, 5.0);
auto task7 = std::make_shared<TaskWithMutex>(std::shared_ptr<std::mutex>(), 4.0);
sc.push(task1);
sc.push(task2);
sc.push(task3);
TS_ASSERT_EQUALS(sc.size(), 3);
std::shared_ptr<Task> task;
// Run the first task. mut1 becomes busy
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task1);
TS_ASSERT_EQUALS(sc.size(), 2);
// Add some tasks with mut2
sc.push(task4);
sc.push(task5);
TS_ASSERT_EQUALS(sc.size(), 4);
// Next one will be task4 since mut1 is locked. mut2 is busy now too.
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task4);
TS_ASSERT_EQUALS(sc.size(), 3);
sc.push(task6);
// Next one will be task6 since mut1 and mut2 are locked. mut3 is busy now
// too.
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task6);
TS_ASSERT_EQUALS(sc.size(), 3);
// This task has NO mutex, so it comes next
sc.push(task7);
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task7);
TS_ASSERT_EQUALS(sc.size(), 3);
// Now we release task1, allowing task2 to come next
sc.finished(task1.get(), 0);
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task2);
TS_ASSERT_EQUALS(sc.size(), 2);
sc.finished(task2.get(), 0); // Have to complete task2 before task3 comes
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task3);
TS_ASSERT_EQUALS(sc.size(), 1);
// mut2 is still locked, but since it's the last one, task5 is returned
task = sc.pop(0);
TS_ASSERT_EQUALS(task, task5);
TS_ASSERT_EQUALS(sc.size(), 0);
// (for this task, the thread pool would have to wait till the mutex is
// released)
}
void test_clear() {
ThreadSchedulerMutexes sc;
for (size_t i = 0; i < 10; i++) {
std::shared_ptr<TaskWithMutex> task = std::make_shared<TaskWithMutex>(std::make_shared<std::mutex>(), 10.0);
sc.push(task);
}
TS_ASSERT_EQUALS(sc.size(), 10);
ThreadSchedulerMutexesTest_timesDeleted = 0;
sc.clear();
TS_ASSERT_EQUALS(sc.size(), 0);
// Was the destructor called enough times?
TS_ASSERT_EQUALS(ThreadSchedulerMutexesTest_timesDeleted, 10);
}
void test_performance_same_mutex() {
ThreadSchedulerMutexes sc;
Timer tim0;
auto mut1 = std::make_shared<std::mutex>();
size_t num = 500;
for (size_t i = 0; i < num; i++) {
sc.push(std::make_shared<TaskWithMutex>(mut1, 10.0));
}
// std::cout << tim0.elapsed() << " secs to push.\n";
TS_ASSERT_EQUALS(sc.size(), num);
Timer tim1;
for (size_t i = 0; i < num; i++) {
sc.pop(0);
}
// std::cout << tim1.elapsed() << " secs to pop.\n";
TS_ASSERT_EQUALS(sc.size(), 0);
}
void test_performance_lotsOfMutexes() {
ThreadSchedulerMutexes sc;
Timer tim0;
size_t num = 500;
for (size_t i = 0; i < num; i++) {
sc.push(std::make_shared<TaskWithMutex>(std::make_shared<std::mutex>(), 10.0));
}
// std::cout << tim0.elapsed() << " secs to push.\n";
TS_ASSERT_EQUALS(sc.size(), num);
Timer tim1;
for (size_t i = 0; i < num; i++) {
sc.pop(0);
}
// std::cout << tim1.elapsed() << " secs to pop.\n";
TS_ASSERT_EQUALS(sc.size(), 0);
}
};