Skip to content

Commit

Permalink
Changing mask_type to dynamic_bitset (work on #721)
Browse files Browse the repository at this point in the history
  • Loading branch information
sithhell committed Apr 12, 2013
1 parent 465ec6a commit a17ce0a
Show file tree
Hide file tree
Showing 21 changed files with 103 additions and 910 deletions.
26 changes: 9 additions & 17 deletions hpx/runtime/threads/policies/abp_priority_queue_scheduler.hpp
Expand Up @@ -107,7 +107,7 @@ namespace hpx { namespace threads { namespace policies

bool numa_sensitive() const { return numa_sensitive_; }

std::size_t get_pu_mask(topology const& topology, std::size_t num_thread) const
threads::mask_type get_pu_mask(topology const& topology, std::size_t num_thread) const
{
return topology.get_thread_affinity_mask(num_thread, numa_sensitive_);
}
Expand Down Expand Up @@ -162,19 +162,12 @@ namespace hpx { namespace threads { namespace policies
{
// try to figure out the NUMA node where the data lives
if (numa_sensitive_ && std::size_t(-1) == num_thread) {
boost::uint64_t mask = 0;
threads::mask_type mask;
#if HPX_THREAD_MAINTAIN_TARGET_ADDRESS
mask = topology_.get_thread_affinity_mask_from_lva(data.lva);
#endif
if (mask) {
std::size_t m = 0x01LL;
for (std::size_t i = 0; i < queues_.size(); m <<= 1, ++i)
{
if (!(m & mask))
continue;
num_thread = i;
break;
}
if (mask.any()) {
num_thread = mask.find_first(i);
}
}
if (std::size_t(-1) == num_thread)
Expand Down Expand Up @@ -551,16 +544,15 @@ namespace hpx { namespace threads { namespace policies

// steal work items: first try to steal from other cores in the
// same NUMA node
boost::uint64_t core_mask
threads::mask_type core_mask
= topology_.get_thread_affinity_mask(num_thread, numa_sensitive_);
boost::uint64_t node_mask
threads::mask_type node_mask
= topology_.get_numa_node_affinity_mask(num_thread, numa_sensitive_);

if (core_mask && node_mask) {
boost::uint64_t m = 0x01LL;
for (std::size_t i = 1; i < queues_size; m <<= 1, ++i)
if (core_mask.any() && node_mask.any()) {
for (std::size_t i = 1; i < queues_size; ++i)
{
if (i == num_thread || !(m & node_mask))
if (i == num_thread || !(node_mask.test(i)))
continue; // don't steal from ourselves

result = queues_[num_thread]->steal_new_or_terminate(
Expand Down
13 changes: 6 additions & 7 deletions hpx/runtime/threads/policies/abp_queue_scheduler.hpp
Expand Up @@ -88,7 +88,7 @@ struct abp_queue_scheduler : boost::noncopyable

bool numa_sensitive() const { return numa_sensitive_; }

std::size_t get_pu_mask(topology const& topology, std::size_t num_thread) const
threads::mask_type get_pu_mask(topology const& topology, std::size_t num_thread) const
{
return topology.get_thread_affinity_mask(num_thread, numa_sensitive_);
}
Expand Down Expand Up @@ -291,17 +291,16 @@ struct abp_queue_scheduler : boost::noncopyable

// steal work items: first try to steal from other cores in the
// same NUMA node
boost::uint64_t core_mask
threads::mask_type core_mask
= topology_.get_thread_affinity_mask(num_thread, numa_sensitive_);
boost::uint64_t node_mask
threads::mask_type node_mask
= topology_.get_numa_node_affinity_mask(num_thread, numa_sensitive_);

std::size_t queue_size = queues_.size();
if (core_mask && node_mask) {
boost::uint64_t m = 0x01LL;
for (std::size_t i = 1; i < queue_size; m <<= 1, ++i)
if (core_mask.any() && node_mask.any()) {
for (std::size_t i = 1; i < queue_size; ++i)
{
if (i == num_thread || !(m & node_mask))
if (i == num_thread || !(node_mask.test(i)))
continue; // don't steal from ourselves

result = queues_[num_thread]->steal_new_or_terminate(
Expand Down
2 changes: 1 addition & 1 deletion hpx/runtime/threads/policies/global_queue_scheduler.hpp
Expand Up @@ -50,7 +50,7 @@ namespace hpx { namespace threads { namespace policies

bool numa_sensitive() const { return false; }

std::size_t get_pu_mask(topology const& topology, std::size_t num_thread) const
threads::mask_type get_pu_mask(topology const& topology, std::size_t num_thread) const
{
return topology.get_thread_affinity_mask(num_thread, false);
}
Expand Down
4 changes: 2 additions & 2 deletions hpx/runtime/threads/policies/hierarchy_scheduler.hpp
Expand Up @@ -174,7 +174,7 @@ namespace hpx { namespace threads { namespace policies

bool numa_sensitive() const { return numa_sensitive_; }

std::size_t get_pu_mask(topology const& topology, std::size_t num_thread) const
threads::mask_type get_pu_mask(topology const& topology, std::size_t num_thread) const
{
return topology.get_thread_affinity_mask(num_thread, numa_sensitive_);
}
Expand All @@ -184,7 +184,7 @@ namespace hpx { namespace threads { namespace policies
return num_thread;
}

std::size_t get_num_stolen_threads(std::size_t num_thread, bbool reset) const
std::size_t get_num_stolen_threads(std::size_t num_thread, bool reset) const
{
return 0;
}
Expand Down
6 changes: 3 additions & 3 deletions hpx/runtime/threads/policies/hwloc_topology.hpp
Expand Up @@ -71,12 +71,12 @@ namespace hpx { namespace threads

void set_thread_affinity_mask(
boost::thread&
, mask_type //mask
, mask_type const & //mask
, error_code& ec = throws
) const;

void set_thread_affinity_mask(
mask_type mask
mask_type const & mask
, error_code& ec = throws
) const;

Expand All @@ -93,7 +93,7 @@ namespace hpx { namespace threads
std::size_t num_numa_node
) const;
mask_type init_core_affinity_mask_from_core(
std::size_t num_core, mask_type default_mask
std::size_t num_core, mask_type const & default_mask
) const;
mask_type init_thread_affinity_mask(std::size_t num_thread) const;

Expand Down
176 changes: 0 additions & 176 deletions hpx/runtime/threads/policies/linux_topology.hpp

This file was deleted.

22 changes: 7 additions & 15 deletions hpx/runtime/threads/policies/local_priority_queue_scheduler.hpp
Expand Up @@ -126,7 +126,7 @@ namespace hpx { namespace threads { namespace policies

bool numa_sensitive() const { return numa_sensitive_; }

std::size_t get_pu_mask(topology const& topology, std::size_t num_thread) const
threads::mask_type get_pu_mask(topology const& topology, std::size_t num_thread) const
{
return affinity_data_.get_pu_mask(topology, num_thread, numa_sensitive_);
}
Expand Down Expand Up @@ -201,19 +201,12 @@ namespace hpx { namespace threads { namespace policies
{
// try to figure out the NUMA node where the data lives
if (numa_sensitive_ && std::size_t(-1) == num_thread) {
boost::uint64_t mask = 0;
threads::mask_type mask;
#if HPX_THREAD_MAINTAIN_TARGET_ADDRESS
mask = topology_.get_thread_affinity_mask_from_lva(data.lva);
#endif
if (mask) {
std::size_t m = 0x01LL;
for (std::size_t i = 0; i < queues_.size(); m <<= 1, ++i)
{
if (!(m & mask))
continue;
num_thread = i;
break;
}
if (mask.any()) {
num_thread = mask.find_first();
}
}
if (std::size_t(-1) == num_thread)
Expand Down Expand Up @@ -608,11 +601,10 @@ namespace hpx { namespace threads { namespace policies
mask_type node_mask =
topology_.get_numa_node_affinity_mask(num_pu, numa_sensitive_);

if (core_mask && node_mask) {
boost::uint64_t m = 0x01LL;
for (std::size_t i = 0; i < queues_size; m <<= 1, ++i)
if (core_mask.any() && node_mask.any()) {
for (std::size_t i = 0; i < queues_size; ++i)
{
if (i == num_thread || !(m & node_mask))
if (i == num_thread || !(node_mask.test(i)))
continue; // don't steal from ourselves

result = queues_[num_thread]->wait_or_add_new(i,
Expand Down
11 changes: 5 additions & 6 deletions hpx/runtime/threads/policies/local_queue_scheduler.hpp
Expand Up @@ -105,7 +105,7 @@ namespace hpx { namespace threads { namespace policies

bool numa_sensitive() const { return numa_sensitive_; }

std::size_t get_pu_mask(topology const& topology, std::size_t num_thread) const
threads::mask_type get_pu_mask(topology const& topology, std::size_t num_thread) const
{
return affinity_data_.get_pu_mask(topology, num_thread, numa_sensitive_);
}
Expand Down Expand Up @@ -320,15 +320,14 @@ namespace hpx { namespace threads { namespace policies
mask_type node_mask = topology_.get_numa_node_affinity_mask(
num_thread, numa_sensitive_);

if (core_mask && node_mask) {
mask_type m = 0x01LL;
if (core_mask.any() && node_mask.any()) {
for (std::size_t i = 0; (0 == added) && i < queues_.size();
m <<= 1, ++i)
++i)
{
if (m == core_mask || !(m & node_mask))
if (m == core_mask || !(node_mask.test(i)))
continue; // don't steal from ourselves

std::size_t idx = least_significant_bit_set(m);
std::size_t idx = m[m.find_first()];
BOOST_ASSERT(idx < queues_.size());

result = queues_[num_thread]->wait_or_add_new(idx,
Expand Down

0 comments on commit a17ce0a

Please sign in to comment.