From 51e6bebe6ad5ba99e7a8db9a9a64b6184c32a6a2 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 6 Apr 2013 17:10:03 +0200 Subject: [PATCH] Use std::size_t instead of plain size_t --- include/async++.h | 1 + include/async++/task_base.h | 4 ++-- include/async++/when_all_any.h | 22 +++++++++++----------- src/aligned_alloc.h | 2 +- src/fifo_queue.h | 6 +++--- src/work_steal_queue.h | 12 ++++++------ 6 files changed, 24 insertions(+), 23 deletions(-) diff --git a/include/async++.h b/include/async++.h index ecbbcd6..04ac28e 100644 --- a/include/async++.h +++ b/include/async++.h @@ -23,6 +23,7 @@ #include #include +#include #include #include #include diff --git a/include/async++/task_base.h b/include/async++/task_base.h index eba7900..3b0b176 100644 --- a/include/async++/task_base.h +++ b/include/async++/task_base.h @@ -43,7 +43,7 @@ enum class dispatch_op { // Continuation vector optimized for single continuations. Only supports a // minimal set of operations. class continuation_vector { - size_t count{0}; + std::size_t count{0}; union data_union { data_union(): inline_data() {} @@ -109,7 +109,7 @@ class continuation_vector { count = 0; } - size_t size() const + std::size_t size() const { return count; } diff --git a/include/async++/when_all_any.h b/include/async++/when_all_any.h index e9947ef..cf69c67 100644 --- a/include/async++/when_all_any.h +++ b/include/async++/when_all_any.h @@ -31,7 +31,7 @@ template struct when_all_state_range: public ref_count_base event; - when_all_state_range(size_t count) + when_all_state_range(std::size_t count) : ref_count_base>(count), results(count) {} // When all references are dropped, signal the event @@ -40,7 +40,7 @@ template struct when_all_state_range: public ref_count_base void set(size_t i, U&& u) + template void set(std::size_t i, U&& u) { results[i] = std::forward(u); } @@ -54,7 +54,7 @@ template<> struct when_all_state_range: public ref_count_base event; - when_all_state_range(size_t count) + when_all_state_range(std::size_t count) : ref_count_base(count) {} // When all references are dropped, signal the event @@ -63,7 +63,7 @@ template<> struct when_all_state_range: public ref_count_base empty_range() { @@ -88,25 +88,25 @@ template struct when_all_state_variadic: public ref_count_base struct when_any_state: public ref_count_base> { - typedef std::pair task_type; + typedef std::pair task_type; event_task event; when_any_state(int count) : ref_count_base>(count) {} - template void set(size_t i, U&& u) + template void set(std::size_t i, U&& u) { event.set(std::make_pair(i, std::forward(u))); } }; template<> struct when_any_state: public ref_count_base> { - typedef size_t task_type; + typedef std::size_t task_type; event_task event; - when_any_state(size_t count) + when_any_state(std::size_t count) : ref_count_base(count) {} - void set(size_t i, fake_void) + void set(std::size_t i, fake_void) { event.set(i); } @@ -189,7 +189,7 @@ template task> state(state_ptr); @@ -228,7 +228,7 @@ template taskevent.get_task(); // Add a continuation to each task to set the event. First one wins. - for (size_t i = 0; begin != end; i++, ++begin) { + for (std::size_t i = 0; begin != end; i++, ++begin) { try { (*begin).then(inline_scheduler(), [state_ptr, i](task_type t) { detail::ref_count_ptr> state(state_ptr); diff --git a/src/aligned_alloc.h b/src/aligned_alloc.h index 027a970..e826da5 100644 --- a/src/aligned_alloc.h +++ b/src/aligned_alloc.h @@ -32,7 +32,7 @@ namespace async { namespace detail { // Allocate an aligned block of memory -inline void* aligned_alloc(size_t size, size_t align) +inline void* aligned_alloc(std::size_t size, std::size_t align) { #ifdef _WIN32 void* ptr = _aligned_malloc(size, align); diff --git a/src/fifo_queue.h b/src/fifo_queue.h index 24298e1..dad5d6c 100644 --- a/src/fifo_queue.h +++ b/src/fifo_queue.h @@ -23,10 +23,10 @@ namespace detail { // Queue used to hold tasks from outside the thread pool, in FIFO order class fifo_queue { - size_t length; + std::size_t length; std::unique_ptr items; spinlock lock; - size_t head{0}, tail{0}; + std::size_t head{0}, tail{0}; public: fifo_queue() @@ -41,7 +41,7 @@ class fifo_queue { if (head == ((tail + 1) & (length - 1))) { length *= 2; std::unique_ptr ptr(new task_handle[length]); - for (size_t i = 0; i < tail - head; i++) + for (std::size_t i = 0; i < tail - head; i++) ptr[i] = std::move(items[(i + head) & (length - 1)]); items = std::move(ptr); } diff --git a/src/work_steal_queue.h b/src/work_steal_queue.h index e8832b2..432b722 100644 --- a/src/work_steal_queue.h +++ b/src/work_steal_queue.h @@ -27,10 +27,10 @@ namespace detail { // themselves into smaller tasks, this allows larger chunks of work to be // stolen. class work_steal_queue { - size_t length; + std::size_t length; std::unique_ptr items; spinlock lock; - std::atomic atomic_head{0}, atomic_tail{0}; + std::atomic atomic_head{0}, atomic_tail{0}; public: work_steal_queue() @@ -39,13 +39,13 @@ class work_steal_queue { // Push a task to the tail of this thread's queue void push(task_handle t) { - size_t tail = atomic_tail.load(std::memory_order_relaxed); + std::size_t tail = atomic_tail.load(std::memory_order_relaxed); // Check if we have space to insert an element at the tail if (tail == length) { // Lock the queue std::lock_guard locked(lock); - size_t head = atomic_head.load(std::memory_order_relaxed); + std::size_t head = atomic_head.load(std::memory_order_relaxed); // Resize the queue if it is more than 75% full if (head <= length / 4) { @@ -69,7 +69,7 @@ class work_steal_queue { // Pop a task from the tail of this thread's queue task_handle pop() { - size_t tail = atomic_tail.load(std::memory_order_relaxed); + std::size_t tail = atomic_tail.load(std::memory_order_relaxed); // Early exit if queue is empty if (atomic_head.load(std::memory_order_relaxed) >= tail) @@ -102,7 +102,7 @@ class work_steal_queue { std::lock_guard locked(lock); // Make sure head is stored before we read tail - size_t head = atomic_head.load(std::memory_order_relaxed); + std::size_t head = atomic_head.load(std::memory_order_relaxed); atomic_head.store(head + 1, std::memory_order_relaxed); std::atomic_thread_fence(std::memory_order_seq_cst);