Permalink
Browse files

Use std::size_t instead of plain size_t

  • Loading branch information...
Amanieu committed Apr 6, 2013
1 parent f3ea31c commit 51e6bebe6ad5ba99e7a8db9a9a64b6184c32a6a2
Showing with 24 additions and 23 deletions.
  1. +1 −0 include/async++.h
  2. +2 −2 include/async++/task_base.h
  3. +11 −11 include/async++/when_all_any.h
  4. +1 −1 src/aligned_alloc.h
  5. +3 −3 src/fifo_queue.h
  6. +6 −6 src/work_steal_queue.h
View
@@ -23,6 +23,7 @@
#include <algorithm>
#include <atomic>
+#include <cstdlib>
#include <exception>
#include <memory>
#include <mutex>
@@ -43,7 +43,7 @@ enum class dispatch_op {
// Continuation vector optimized for single continuations. Only supports a
// minimal set of operations.
class continuation_vector {
- size_t count{0};
+ std::size_t count{0};
union data_union {
data_union(): inline_data() {}
@@ -109,7 +109,7 @@ class continuation_vector {
count = 0;
}
- size_t size() const
+ std::size_t size() const
{
return count;
}
@@ -31,7 +31,7 @@ template<typename T> struct when_all_state_range: public ref_count_base<when_all
task_type results;
event_task<task_type> event;
- when_all_state_range(size_t count)
+ when_all_state_range(std::size_t count)
: ref_count_base<when_all_state_range<T>>(count), results(count) {}
// When all references are dropped, signal the event
@@ -40,7 +40,7 @@ template<typename T> struct when_all_state_range: public ref_count_base<when_all
event.set(std::move(results));
}
- template<typename U> void set(size_t i, U&& u)
+ template<typename U> void set(std::size_t i, U&& u)
{
results[i] = std::forward<U>(u);
}
@@ -54,7 +54,7 @@ template<> struct when_all_state_range<void>: public ref_count_base<when_all_sta
typedef void task_type;
event_task<void> event;
- when_all_state_range(size_t count)
+ when_all_state_range(std::size_t count)
: ref_count_base(count) {}
// When all references are dropped, signal the event
@@ -63,7 +63,7 @@ template<> struct when_all_state_range<void>: public ref_count_base<when_all_sta
event.set();
}
- void set(size_t, fake_void) {}
+ void set(std::size_t, fake_void) {}
static task<task_type> empty_range()
{
@@ -88,25 +88,25 @@ template<typename Tuple> struct when_all_state_variadic: public ref_count_base<w
// when_any shared state
template<typename T> struct when_any_state: public ref_count_base<when_any_state<T>> {
- typedef std::pair<size_t, T> task_type;
+ typedef std::pair<std::size_t, T> task_type;
event_task<task_type> event;
when_any_state(int count)
: ref_count_base<when_any_state<T>>(count) {}
- template<typename U> void set(size_t i, U&& u)
+ template<typename U> void set(std::size_t i, U&& u)
{
event.set(std::make_pair(i, std::forward<U>(u)));
}
};
template<> struct when_any_state<void>: public ref_count_base<when_any_state<void>> {
- typedef size_t task_type;
+ typedef std::size_t task_type;
event_task<task_type> event;
- when_any_state(size_t count)
+ when_any_state(std::size_t count)
: ref_count_base(count) {}
- void set(size_t i, fake_void)
+ void set(std::size_t i, fake_void)
{
event.set(i);
}
@@ -189,7 +189,7 @@ template<typename Iter> task<typename detail::when_all_state_range<typename std:
// Add a continuation to each task to add its result to the shared state
// Last task sets the event result
- for (size_t i = 0; begin != end; i++, ++begin) {
+ for (std::size_t i = 0; begin != end; i++, ++begin) {
try {
(*begin).then(inline_scheduler(), [state_ptr, i](task_type t) {
detail::ref_count_ptr<detail::when_all_state_range<result_type>> state(state_ptr);
@@ -228,7 +228,7 @@ template<typename Iter> task<typename detail::when_any_state<typename std::itera
auto out = state_ptr->event.get_task();
// Add a continuation to each task to set the event. First one wins.
- for (size_t i = 0; begin != end; i++, ++begin) {
+ for (std::size_t i = 0; begin != end; i++, ++begin) {
try {
(*begin).then(inline_scheduler(), [state_ptr, i](task_type t) {
detail::ref_count_ptr<detail::when_any_state<result_type>> state(state_ptr);
View
@@ -32,7 +32,7 @@ namespace async {
namespace detail {
// Allocate an aligned block of memory
-inline void* aligned_alloc(size_t size, size_t align)
+inline void* aligned_alloc(std::size_t size, std::size_t align)
{
#ifdef _WIN32
void* ptr = _aligned_malloc(size, align);
View
@@ -23,10 +23,10 @@ namespace detail {
// Queue used to hold tasks from outside the thread pool, in FIFO order
class fifo_queue {
- size_t length;
+ std::size_t length;
std::unique_ptr<task_handle[]> items;
spinlock lock;
- size_t head{0}, tail{0};
+ std::size_t head{0}, tail{0};
public:
fifo_queue()
@@ -41,7 +41,7 @@ class fifo_queue {
if (head == ((tail + 1) & (length - 1))) {
length *= 2;
std::unique_ptr<task_handle[]> ptr(new task_handle[length]);
- for (size_t i = 0; i < tail - head; i++)
+ for (std::size_t i = 0; i < tail - head; i++)
ptr[i] = std::move(items[(i + head) & (length - 1)]);
items = std::move(ptr);
}
View
@@ -27,10 +27,10 @@ namespace detail {
// themselves into smaller tasks, this allows larger chunks of work to be
// stolen.
class work_steal_queue {
- size_t length;
+ std::size_t length;
std::unique_ptr<task_handle[]> items;
spinlock lock;
- std::atomic<size_t> atomic_head{0}, atomic_tail{0};
+ std::atomic<std::size_t> atomic_head{0}, atomic_tail{0};
public:
work_steal_queue()
@@ -39,13 +39,13 @@ class work_steal_queue {
// Push a task to the tail of this thread's queue
void push(task_handle t)
{
- size_t tail = atomic_tail.load(std::memory_order_relaxed);
+ std::size_t tail = atomic_tail.load(std::memory_order_relaxed);
// Check if we have space to insert an element at the tail
if (tail == length) {
// Lock the queue
std::lock_guard<spinlock> locked(lock);
- size_t head = atomic_head.load(std::memory_order_relaxed);
+ std::size_t head = atomic_head.load(std::memory_order_relaxed);
// Resize the queue if it is more than 75% full
if (head <= length / 4) {
@@ -69,7 +69,7 @@ class work_steal_queue {
// Pop a task from the tail of this thread's queue
task_handle pop()
{
- size_t tail = atomic_tail.load(std::memory_order_relaxed);
+ std::size_t tail = atomic_tail.load(std::memory_order_relaxed);
// Early exit if queue is empty
if (atomic_head.load(std::memory_order_relaxed) >= tail)
@@ -102,7 +102,7 @@ class work_steal_queue {
std::lock_guard<spinlock> locked(lock);
// Make sure head is stored before we read tail
- size_t head = atomic_head.load(std::memory_order_relaxed);
+ std::size_t head = atomic_head.load(std::memory_order_relaxed);
atomic_head.store(head + 1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);

0 comments on commit 51e6beb

Please sign in to comment.