forked from npshub/mantid
-
Notifications
You must be signed in to change notification settings - Fork 0
/
MutexTest.h
98 lines (86 loc) · 2.9 KB
/
MutexTest.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
// Mantid Repository : https://github.com/mantidproject/mantid
//
// Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
// NScD Oak Ridge National Laboratory, European Spallation Source,
// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
// SPDX - License - Identifier: GPL - 3.0 +
#pragma once
#include "MantidKernel/CPUTimer.h"
#include "MantidKernel/FunctionTask.h"
#include "MantidKernel/MultiThreaded.h"
#include "MantidKernel/ThreadPool.h"
#include <Poco/RWLock.h>
#include <cxxtest/TestSuite.h>
using namespace Mantid::Kernel;
#define DATA_SIZE 10000000
std::vector<double> shared_data;
namespace {
Poco::RWLock g_mutex;
}
// Poco::ScopedReadRWLock getReadLock()
//{
// return Poco::ScopedReadRWLock(g_mutex);
//}
void reader() {
// std::cout << "Read started\n";
Poco::ScopedReadRWLock lock(g_mutex);
// std::cout << "Read launching\n";
// do work here, without anyone having exclusive access
for (double val : shared_data) {
UNUSED_ARG(val)
}
// std::cout << "Read finished\n";
}
void unconditional_writer() {
// std::cout << "Write started\n";
Poco::ScopedWriteRWLock lock(g_mutex);
// std::cout << "Write launching\n";
// do work here, with exclusive access
shared_data.resize(shared_data.size() + 1, 2.345);
// Dumb thing to slow down the writer
for (double &i : shared_data)
i = 4.567;
// std::cout << "Write finished\n";
}
class MutexTest : public CxxTest::TestSuite {
public:
void setUp() override { shared_data.resize(DATA_SIZE, 1.000); }
void tearDown() override {}
void test_nothing() {}
/** Launch a bunch of reading threads */
void test_simultaneous_read() {
ThreadPool pool;
CPUTimer tim;
size_t numTasks = 50;
for (size_t i = 0; i < numTasks; i++)
pool.schedule(std::make_shared<FunctionTask>(reader));
pool.joinAll();
std::cout << tim << " to execute all " << numTasks << " tasks\n";
}
/** Launch a bunch of writing threads */
void test_simultaneous_write() {
ThreadPool pool;
CPUTimer tim;
size_t numTasks = 10;
for (size_t i = 0; i < numTasks; i++)
pool.schedule(std::make_shared<FunctionTask>(unconditional_writer));
pool.joinAll();
std::cout << tim << " to execute all " << numTasks << " tasks\n";
TSM_ASSERT_EQUALS("The writers were all called", shared_data.size(), DATA_SIZE + numTasks)
}
/** Mix 1 writing thread for 9 reading threads */
void test_write_blocks_readers() {
ThreadPool pool;
CPUTimer tim;
size_t numTasks = 50;
for (size_t i = 0; i < numTasks; i++) {
if (i % 10 == 0)
pool.schedule(std::make_shared<FunctionTask>(unconditional_writer));
else
pool.schedule(std::make_shared<FunctionTask>(reader));
}
pool.joinAll();
std::cout << tim << " to execute all " << numTasks << " tasks\n";
TSM_ASSERT_EQUALS("The writers were all called", shared_data.size(), DATA_SIZE + numTasks / 10)
}
};