Skip to content

Commit

Permalink
Use std::size_t instead of plain size_t
Browse files Browse the repository at this point in the history
  • Loading branch information
Amanieu committed Apr 6, 2013
1 parent f3ea31c commit 51e6beb
Show file tree
Hide file tree
Showing 6 changed files with 24 additions and 23 deletions.
1 change: 1 addition & 0 deletions include/async++.h
Expand Up @@ -23,6 +23,7 @@

#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <exception>
#include <memory>
#include <mutex>
Expand Down
4 changes: 2 additions & 2 deletions include/async++/task_base.h
Expand Up @@ -43,7 +43,7 @@ enum class dispatch_op {
// Continuation vector optimized for single continuations. Only supports a
// minimal set of operations.
class continuation_vector {
size_t count{0};
std::size_t count{0};

union data_union {
data_union(): inline_data() {}
Expand Down Expand Up @@ -109,7 +109,7 @@ class continuation_vector {
count = 0;
}

size_t size() const
std::size_t size() const
{
return count;
}
Expand Down
22 changes: 11 additions & 11 deletions include/async++/when_all_any.h
Expand Up @@ -31,7 +31,7 @@ template<typename T> struct when_all_state_range: public ref_count_base<when_all
task_type results;
event_task<task_type> event;

when_all_state_range(size_t count)
when_all_state_range(std::size_t count)
: ref_count_base<when_all_state_range<T>>(count), results(count) {}

// When all references are dropped, signal the event
Expand All @@ -40,7 +40,7 @@ template<typename T> struct when_all_state_range: public ref_count_base<when_all
event.set(std::move(results));
}

template<typename U> void set(size_t i, U&& u)
template<typename U> void set(std::size_t i, U&& u)
{
results[i] = std::forward<U>(u);
}
Expand All @@ -54,7 +54,7 @@ template<> struct when_all_state_range<void>: public ref_count_base<when_all_sta
typedef void task_type;
event_task<void> event;

when_all_state_range(size_t count)
when_all_state_range(std::size_t count)
: ref_count_base(count) {}

// When all references are dropped, signal the event
Expand All @@ -63,7 +63,7 @@ template<> struct when_all_state_range<void>: public ref_count_base<when_all_sta
event.set();
}

void set(size_t, fake_void) {}
void set(std::size_t, fake_void) {}

static task<task_type> empty_range()
{
Expand All @@ -88,25 +88,25 @@ template<typename Tuple> struct when_all_state_variadic: public ref_count_base<w

// when_any shared state
template<typename T> struct when_any_state: public ref_count_base<when_any_state<T>> {
typedef std::pair<size_t, T> task_type;
typedef std::pair<std::size_t, T> task_type;
event_task<task_type> event;

when_any_state(int count)
: ref_count_base<when_any_state<T>>(count) {}

template<typename U> void set(size_t i, U&& u)
template<typename U> void set(std::size_t i, U&& u)
{
event.set(std::make_pair(i, std::forward<U>(u)));
}
};
template<> struct when_any_state<void>: public ref_count_base<when_any_state<void>> {
typedef size_t task_type;
typedef std::size_t task_type;
event_task<task_type> event;

when_any_state(size_t count)
when_any_state(std::size_t count)
: ref_count_base(count) {}

void set(size_t i, fake_void)
void set(std::size_t i, fake_void)
{
event.set(i);
}
Expand Down Expand Up @@ -189,7 +189,7 @@ template<typename Iter> task<typename detail::when_all_state_range<typename std:

// Add a continuation to each task to add its result to the shared state
// Last task sets the event result
for (size_t i = 0; begin != end; i++, ++begin) {
for (std::size_t i = 0; begin != end; i++, ++begin) {
try {
(*begin).then(inline_scheduler(), [state_ptr, i](task_type t) {
detail::ref_count_ptr<detail::when_all_state_range<result_type>> state(state_ptr);
Expand Down Expand Up @@ -228,7 +228,7 @@ template<typename Iter> task<typename detail::when_any_state<typename std::itera
auto out = state_ptr->event.get_task();

// Add a continuation to each task to set the event. First one wins.
for (size_t i = 0; begin != end; i++, ++begin) {
for (std::size_t i = 0; begin != end; i++, ++begin) {
try {
(*begin).then(inline_scheduler(), [state_ptr, i](task_type t) {
detail::ref_count_ptr<detail::when_any_state<result_type>> state(state_ptr);
Expand Down
2 changes: 1 addition & 1 deletion src/aligned_alloc.h
Expand Up @@ -32,7 +32,7 @@ namespace async {
namespace detail {

// Allocate an aligned block of memory
inline void* aligned_alloc(size_t size, size_t align)
inline void* aligned_alloc(std::size_t size, std::size_t align)
{
#ifdef _WIN32
void* ptr = _aligned_malloc(size, align);
Expand Down
6 changes: 3 additions & 3 deletions src/fifo_queue.h
Expand Up @@ -23,10 +23,10 @@ namespace detail {

// Queue used to hold tasks from outside the thread pool, in FIFO order
class fifo_queue {
size_t length;
std::size_t length;
std::unique_ptr<task_handle[]> items;
spinlock lock;
size_t head{0}, tail{0};
std::size_t head{0}, tail{0};

public:
fifo_queue()
Expand All @@ -41,7 +41,7 @@ class fifo_queue {
if (head == ((tail + 1) & (length - 1))) {
length *= 2;
std::unique_ptr<task_handle[]> ptr(new task_handle[length]);
for (size_t i = 0; i < tail - head; i++)
for (std::size_t i = 0; i < tail - head; i++)
ptr[i] = std::move(items[(i + head) & (length - 1)]);
items = std::move(ptr);
}
Expand Down
12 changes: 6 additions & 6 deletions src/work_steal_queue.h
Expand Up @@ -27,10 +27,10 @@ namespace detail {
// themselves into smaller tasks, this allows larger chunks of work to be
// stolen.
class work_steal_queue {
size_t length;
std::size_t length;
std::unique_ptr<task_handle[]> items;
spinlock lock;
std::atomic<size_t> atomic_head{0}, atomic_tail{0};
std::atomic<std::size_t> atomic_head{0}, atomic_tail{0};

public:
work_steal_queue()
Expand All @@ -39,13 +39,13 @@ class work_steal_queue {
// Push a task to the tail of this thread's queue
void push(task_handle t)
{
size_t tail = atomic_tail.load(std::memory_order_relaxed);
std::size_t tail = atomic_tail.load(std::memory_order_relaxed);

// Check if we have space to insert an element at the tail
if (tail == length) {
// Lock the queue
std::lock_guard<spinlock> locked(lock);
size_t head = atomic_head.load(std::memory_order_relaxed);
std::size_t head = atomic_head.load(std::memory_order_relaxed);

// Resize the queue if it is more than 75% full
if (head <= length / 4) {
Expand All @@ -69,7 +69,7 @@ class work_steal_queue {
// Pop a task from the tail of this thread's queue
task_handle pop()
{
size_t tail = atomic_tail.load(std::memory_order_relaxed);
std::size_t tail = atomic_tail.load(std::memory_order_relaxed);

// Early exit if queue is empty
if (atomic_head.load(std::memory_order_relaxed) >= tail)
Expand Down Expand Up @@ -102,7 +102,7 @@ class work_steal_queue {
std::lock_guard<spinlock> locked(lock);

// Make sure head is stored before we read tail
size_t head = atomic_head.load(std::memory_order_relaxed);
std::size_t head = atomic_head.load(std::memory_order_relaxed);
atomic_head.store(head + 1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);

Expand Down

0 comments on commit 51e6beb

Please sign in to comment.