Skip to content

Commit

Permalink
Adding new performance counter exposing overall scheduler time
Browse files Browse the repository at this point in the history
- adds missing perf counter: /threads/time/overall
  • Loading branch information
hkaiser committed Feb 28, 2016
1 parent 60fff02 commit 3c9d675
Show file tree
Hide file tree
Showing 5 changed files with 86 additions and 0 deletions.
26 changes: 26 additions & 0 deletions docs/manual/existing_performance_counters.qbk
Expand Up @@ -706,6 +706,32 @@ system and application performance.
`HPX_WITH_THREAD_CUMULATIVE_COUNTS` (default: ON) and
`HPX_WITH_THREAD_IDLE_RATES` are set to `ON` (default: OFF).]
]
[ [`/threads/time/overall`]
[`locality#*/total` or[br]
`locality#*/worker-thread#*`

where:[br]
`locality#*` is defining the locality for which the overall time spent
running the scheduler should be queried
for. The locality id (given by `*`) is a (zero based) number
identifying the locality.

`worker-thread#*` is defining the worker thread for which the
overall time spent running the scheduler should
be queried for. The worker thread number (given by the `*`) is a
(zero based) number identifying the worker thread. The number of
available worker threads is usually specified on the command line
for the application using the option [hpx_cmdline `--hpx:threads`].
]
[None]
[Returns the overall time spent running the scheduler on the
given locality since application start. If the
instance name is `total` the counter returns the overall time spent
running the scheduler for all worker threads
(cores) on that locality. If the instance name is `worker-thread#*`
the counter will return the overall time spent running the scheduler
for all worker threads separately.]
]
[ [`/threads/time/cumulative`]
[`locality#*/total` or[br]
`locality#*/worker-thread#*`
Expand Down
3 changes: 3 additions & 0 deletions hpx/runtime/threads/detail/thread_pool.hpp
Expand Up @@ -89,6 +89,8 @@ namespace hpx { namespace threads { namespace detail
#endif
#endif

boost::int64_t get_cumulative_duration(std::size_t num, bool reset);

#if defined(HPX_HAVE_THREAD_IDLE_RATES)
///////////////////////////////////////////////////////////////////////
boost::int64_t avg_idle_rate(bool reset);
Expand Down Expand Up @@ -210,6 +212,7 @@ namespace hpx { namespace threads { namespace detail

// tfunc_impl timers
std::vector<boost::uint64_t> exec_times_, tfunc_times_;
std::vector<boost::uint64_t> reset_tfunc_times_;

// Stores the mask identifying all processing units used by this
// thread manager.
Expand Down
3 changes: 3 additions & 0 deletions hpx/runtime/threads/threadmanager_impl.hpp
Expand Up @@ -235,6 +235,9 @@ namespace hpx { namespace threads
#endif
#endif

boost::int64_t get_cumulative_duration(
std::size_t num = std::size_t(-1), bool reset = false);

protected:
///
template <typename C>
Expand Down
39 changes: 39 additions & 0 deletions src/runtime/threads/detail/thread_pool.cpp
Expand Up @@ -287,6 +287,8 @@ namespace hpx { namespace threads { namespace detail
tfunc_times_.resize(num_threads);
exec_times_.resize(num_threads);

reset_tfunc_times_.resize(num_threads);

#if defined(HPX_HAVE_THREAD_CUMULATIVE_COUNTS)
// timestamps/values of last reset operation for various performance
// counters
Expand Down Expand Up @@ -1114,6 +1116,43 @@ namespace hpx { namespace threads { namespace detail
#endif
#endif

template <typename Scheduler>
boost::int64_t thread_pool<Scheduler>::
get_cumulative_duration(std::size_t num, bool reset)
{
boost::uint64_t tfunc_total = 0ul;
boost::uint64_t reset_tfunc_total = 0ul;

if (num != std::size_t(-1))
{
tfunc_total = tfunc_times_[num];
reset_tfunc_total = reset_tfunc_times_[num];

if (reset)
reset_tfunc_times_[num] = tfunc_total;
}
else
{
tfunc_total = std::accumulate(tfunc_times_.begin(),
tfunc_times_.end(), boost::uint64_t(0));
reset_tfunc_total = std::accumulate(
reset_tfunc_times_.begin(), reset_tfunc_times_.end(),
boost::uint64_t(0));

if (reset)
{
std::copy(tfunc_times_.begin(), tfunc_times_.end(),
reset_tfunc_times_.begin());
}
}

HPX_ASSERT(tfunc_total >= reset_exec_total);

tfunc_total -= reset_tfunc_total;

return boost::uint64_t(double(tfunc_total) * timestamp_scale_);
}

#if defined(HPX_HAVE_THREAD_IDLE_RATES)
///////////////////////////////////////////////////////////////////////////
template <typename Scheduler>
Expand Down
15 changes: 15 additions & 0 deletions src/runtime/threads/threadmanager.cpp
Expand Up @@ -654,6 +654,14 @@ namespace hpx { namespace threads
},
#endif
#endif
// /threads{locality#%d/total}/time/overall
// /threads{locality#%d/worker-thread%d}/time/overall
{ "time/cumulative",
util::bind(&ti::get_cumulative_duration, this, -1, _1),
util::bind(&ti::get_cumulative_duration, this,
static_cast<std::size_t>(paths.instanceindex_), _1),
"worker-thread", shepherd_count
},
// /threads{locality#%d/total}/count/instantaneous/all
// /threads{locality#%d/worker-thread%d}/count/instantaneous/all
{ "count/instantaneous/all",
Expand Down Expand Up @@ -1142,6 +1150,13 @@ namespace hpx { namespace threads
#endif
#endif

template <typename SchedulingPolicy>
boost::int64_t threadmanager_impl<SchedulingPolicy>::
get_cumulative_duration(std::size_t num, bool reset)
{
return pool_.get_cumulative_duration(num, reset);
}

#ifdef HPX_HAVE_THREAD_IDLE_RATES
///////////////////////////////////////////////////////////////////////////
template <typename SchedulingPolicy>
Expand Down

0 comments on commit 3c9d675

Please sign in to comment.