Skip to content

Automated Relative Error Report Generation for Range of Iterations #1672

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 50 additions & 21 deletions examples/concurrent_hash_map/count_strings/count_strings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,9 @@ class hash<std::basic_string<CharT, Traits, Allocator>> {
}

private:
static constexpr std::size_t hash_multiplier = (std::size_t)(
(sizeof(std::size_t) == sizeof(unsigned)) ? 2654435769U : 11400714819323198485ULL);
static constexpr std::size_t hash_multiplier =
(std::size_t)((sizeof(std::size_t) == sizeof(unsigned)) ? 2654435769U
: 11400714819323198485ULL);

std::hash<CharT> char_hash;
}; // struct hash<std::basic_string>
Expand Down Expand Up @@ -74,6 +75,7 @@ static bool count_collisions = false;
//! Problem size
long N = 1000000;
const int size_factor = 2;
int numberOfIterations;

//! A concurrent hash table that maps strings to ints.
typedef oneapi::tbb::concurrent_hash_map<MyString, int> StringTable;
Expand Down Expand Up @@ -240,6 +242,7 @@ int main(int argc, char* argv[]) {
StringTable table;
oneapi::tbb::tick_count mainStartTime = oneapi::tbb::tick_count::now();
srand(2);
double rel_error;

//! Working threads count
// The 1st argument is the function to obtain 'auto' value; the 2nd is the default value
Expand All @@ -253,6 +256,9 @@ int main(int argc, char* argv[]) {
//"-h" option for displaying help is present implicitly
.positional_arg(threads, "n-of-threads", utility::thread_number_range_desc)
.positional_arg(N, "n-of-strings", "number of strings")
.positional_arg(numberOfIterations,
"n-of-iterations",
"number of iterations the example runs internally")
.arg(verbose, "verbose", "verbose mode")
.arg(silent, "silent", "no output except elapsed time")
.arg(count_collisions, "count_collisions", "print the count of collisions"));
Expand All @@ -263,33 +269,56 @@ int main(int argc, char* argv[]) {
Data = new MyString[N];
CreateData();

if (threads.first) {
for (int p = threads.first; p <= threads.last; p = threads.step(p)) {
if (!silent)
printf("threads = %d ", p);
oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism, p);
CountOccurrences(p);
}
if (numberOfIterations <= 0) {
numberOfIterations = 10;
std::cout << "Setting the number of iterations = 10 default"
<< "\n";
}
else {
std::cout << "Input for the number of iterations = " << numberOfIterations << "\n";
}
else { // Number of threads wasn't set explicitly. Run serial and parallel version
{ // serial run
if (!silent)
printf("serial run ");
oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism, 1);
CountOccurrences(1);

utility::measurements mu(numberOfIterations);

for (int iter = 0; iter < numberOfIterations; ++iter) {
mu.start();

if (threads.first) {
for (int p = threads.first; p <= threads.last; p = threads.step(p)) {
if (!silent)
printf("threads = %d ", p);
oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism,
p);
CountOccurrences(p);
}
}
{ // parallel run (number of threads is selected automatically)
if (!silent)
printf("parallel run ");
oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism,
utility::get_default_num_threads());
CountOccurrences(0);
else { // Number of threads wasn't set explicitly. Run serial and parallel version
{ // serial run
if (!silent)
printf("serial run ");

oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism,
1);
CountOccurrences(1);
}
{ // parallel run (number of threads is selected automatically)
if (!silent)
printf("parallel run ");

oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism,
utility::get_default_num_threads());
CountOccurrences(0);
}
}

mu.stop();
}
rel_error = mu.computeRelError();

delete[] Data;

utility::report_elapsed_time((oneapi::tbb::tick_count::now() - mainStartTime).seconds());
utility::report_relative_error(rel_error);

return 0;
}
38 changes: 31 additions & 7 deletions examples/parallel_for_each/parallel_preorder/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ void ParallelPreorderTraversal(const std::vector<Cell*>& root_set);
static unsigned nodes = 1000;
static unsigned traversals = 500;
static bool SilentFlag = false;
static int numberOfIterations = 1000;

//! Parse the command line.
static void ParseCommandLine(int argc, char* argv[], utility::thread_number_range& threads) {
Expand All @@ -53,37 +54,60 @@ static void ParseCommandLine(int argc, char* argv[], utility::thread_number_rang
traversals,
"n-of-traversals",
"number of times to evaluate the graph. Reduce it (e.g. to 100) to shorten example run time\n")
.positional_arg(numberOfIterations,
"n-of-iterations",
"number of iterations the example runs internally")
.arg(SilentFlag, "silent", "no output except elapsed time "));
}

int main(int argc, char* argv[]) {
utility::thread_number_range threads(utility::get_default_num_threads);
oneapi::tbb::tick_count main_start = oneapi::tbb::tick_count::now();
ParseCommandLine(argc, argv, threads);
double rel_error;

if (numberOfIterations <= 0) {
std::cout << "Setting the number of iterations = " << numberOfIterations << " default\n";
}
else {
std::cout << "Input for the number of iterations = " << numberOfIterations << "\n";
}

utility::measurements mu(numberOfIterations);

// Start scheduler with given number of threads.
for (int p = threads.first; p <= threads.last; p = threads.step(p)) {
oneapi::tbb::tick_count t0 = oneapi::tbb::tick_count::now();
oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism, p);
srand(2);
std::size_t root_set_size = 0;
{
Graph g;
g.create_random_dag(nodes);
std::vector<Cell*> root_set;
g.get_root_set(root_set);
root_set_size = root_set.size();
for (unsigned int trial = 0; trial < traversals; ++trial) {

for (int iter = 0; iter < numberOfIterations; ++iter) {
mu.start();

{
Graph g;
g.create_random_dag(nodes);
std::vector<Cell*> root_set;
g.get_root_set(root_set);
root_set_size = root_set.size();

ParallelPreorderTraversal(root_set);
}

mu.stop();
}
rel_error = mu.computeRelError();

oneapi::tbb::tick_count::interval_t interval = oneapi::tbb::tick_count::now() - t0;
if (!SilentFlag) {
std::cout << interval.seconds() << " seconds using " << p << " threads ("
<< root_set_size << " nodes in root_set)\n";
}
}

utility::report_elapsed_time((oneapi::tbb::tick_count::now() - main_start).seconds());
utility::report_relative_error(rel_error);

return 0;
}
6 changes: 5 additions & 1 deletion examples/parallel_reduce/convex_hull/convex_hull.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ namespace util {
bool silent = false;
bool verbose = false;
std::vector<std::string> OUTPUT;
int numberOfIterations = 0;

// utility functionality
void ParseInputArgs(int argc, char* argv[], utility::thread_number_range& threads) {
Expand All @@ -61,6 +62,9 @@ void ParseInputArgs(int argc, char* argv[], utility::thread_number_range& thread
//"-h" option for displaying help is present implicitly
.positional_arg(threads, "n-of-threads", utility::thread_number_range_desc)
.positional_arg(cfg::numberOfPoints, "n-of-points", "number of points")
.positional_arg(numberOfIterations,
"n-of-iterations",
"number of iterations the example runs internally")
.arg(silent, "silent", "no output except elapsed time")
.arg(verbose, "verbose", "turns verbose ON"));
//disabling verbose if silent is specified
Expand Down Expand Up @@ -131,7 +135,7 @@ template <typename Index>
struct edge {
Index start;
Index end;
edge(Index _p1, Index _p2) : start(_p1), end(_p2){};
edge(Index _p1, Index _p2) : start(_p1), end(_p2) {};
};

template <typename T>
Expand Down
86 changes: 59 additions & 27 deletions examples/parallel_reduce/convex_hull/convex_hull_bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -588,41 +588,63 @@ int main(int argc, char *argv[]) {

int nthreads;
util::my_time_t tm_init, tm_start, tm_end;
double rel_error;

if (util::numberOfIterations <= 0) {
util::numberOfIterations = 1;
std::cout << "Setting the number of iterations = " << util::numberOfIterations
<< " default\n";
}
else {
std::cout << "Input for the number of iterations = " << util::numberOfIterations << "\n";
}

#if USECONCVEC
std::cout << "Starting TBB unbuffered push_back version of QUICK HULL algorithm"
<< "\n";
<< "\n\n";
#else
std::cout << "Starting STL locked unbuffered push_back version of QUICK HULL algorithm"
<< "\n";
<< "\n\n";
#endif // USECONCVEC

utility::measurements mu(util::numberOfIterations);

for (nthreads = threads.first; nthreads <= threads.last; nthreads = threads.step(nthreads)) {
pointVec_t points;
pointVec_t hull;

oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism,
nthreads);
tm_init = util::gettime();
initialize<FillRNDPointsVector>(points);
tm_start = util::gettime();
std::cout << "Parallel init time on " << nthreads
<< " threads: " << util::time_diff(tm_init, tm_start)
<< " Points in input: " << points.size() << "\n";

tm_start = util::gettime();
quickhull(points, hull, false);
tm_end = util::gettime();
std::cout << "Time on " << nthreads << " threads: " << util::time_diff(tm_start, tm_end)
<< " Points in hull: " << hull.size() << "\n";
for (int iter = 0; iter < util::numberOfIterations; ++iter) {
tm_init = util::gettime();
initialize<FillRNDPointsVector>(points);
tm_start = util::gettime();
std::cout << "Parallel init time on " << nthreads
<< " threads: " << util::time_diff(tm_init, tm_start)
<< " Points in input: " << points.size() << "\n";

tm_start = util::gettime();
mu.start();

quickhull(points, hull, false);

mu.stop();
tm_end = util::gettime();
std::cout << "Time on " << nthreads << " threads: " << util::time_diff(tm_start, tm_end)
<< " Points in hull: " << hull.size() << "\n";
}

rel_error = mu.computeRelError();
}
utility::report_relative_error(rel_error);

#if USECONCVEC
std::cout << "Starting TBB buffered version of QUICK HULL algorithm"
<< "\n";
std::cout << "\n\nStarting TBB buffered version of QUICK HULL algorithm"
<< "\n\n";
#else
std::cout << "Starting STL locked buffered version of QUICK HULL algorithm"
<< "\n";
<< "\n\n";
#endif

for (nthreads = threads.first; nthreads <= threads.last; nthreads = threads.step(nthreads)) {
Expand All @@ -632,20 +654,30 @@ int main(int argc, char *argv[]) {
oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism,
nthreads);

tm_init = util::gettime();
initialize<FillRNDPointsVector_buf>(points);
tm_start = util::gettime();
std::cout << "Init time on " << nthreads
<< " threads: " << util::time_diff(tm_init, tm_start)
<< " Points in input: " << points.size() << "\n";
for (int iter = 0; iter < util::numberOfIterations; ++iter) {
tm_init = util::gettime();
initialize<FillRNDPointsVector_buf>(points);
tm_start = util::gettime();
std::cout << "Init time on " << nthreads
<< " threads: " << util::time_diff(tm_init, tm_start)
<< " Points in input: " << points.size() << "\n";

tm_start = util::gettime();
mu.start();

tm_start = util::gettime();
quickhull(points, hull, true);
tm_end = util::gettime();
std::cout << "Time on " << nthreads << " threads: " << util::time_diff(tm_start, tm_end)
<< " Points in hull: " << hull.size() << "\n";
quickhull(points, hull, true);

mu.stop();
tm_end = util::gettime();
std::cout << "Time on " << nthreads << " threads: " << util::time_diff(tm_start, tm_end)
<< " Points in hull: " << hull.size() << "\n";
}

rel_error = mu.computeRelError();
}

utility::report_relative_error(rel_error);

return 0;
}

Expand Down
Loading
Loading