Skip to content

Commit

Permalink
Export of internal Abseil changes
Browse files Browse the repository at this point in the history
--
972333fe1e43427849b8a634aa35061e81be3642 by Abseil Team <absl-team@google.com>:

Replace deprecated thread annotations macros.

PiperOrigin-RevId: 267332619

--
7039c6dc499a31c372b4872eda0772455931c360 by Gennadiy Rozental <rogeeff@google.com>:

Internal change

PiperOrigin-RevId: 267220271

--
a3f524d2afc2535686f206a7ce06961016349d7a by Abseil Team <absl-team@google.com>:

Factor kernel_timeout out of synchronization.

PiperOrigin-RevId: 267217304

--
90287de4114ef9a06cafe50256a2d03349772c21 by Abseil Team <absl-team@google.com>:

Fixed comment typo.

PiperOrigin-RevId: 267198532

--
d312c1a1e52aeca1871ff0deead416d09a7f237e by Gennadiy Rozental <rogeeff@google.com>:

Internal change

PiperOrigin-RevId: 267185804
GitOrigin-RevId: 972333fe1e43427849b8a634aa35061e81be3642
Change-Id: Ia8a2f877c57cef9854aad48f1753af872fc04dc8
  • Loading branch information
Abseil Team authored and rogeeff committed Sep 5, 2019
1 parent eb6b7bd commit 83c1d65
Show file tree
Hide file tree
Showing 27 changed files with 282 additions and 377 deletions.
12 changes: 6 additions & 6 deletions absl/base/call_once_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ absl::once_flag once;

ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit);

int running_thread_count GUARDED_BY(counters_mu) = 0;
int call_once_invoke_count GUARDED_BY(counters_mu) = 0;
int call_once_finished_count GUARDED_BY(counters_mu) = 0;
int call_once_return_count GUARDED_BY(counters_mu) = 0;
bool done_blocking GUARDED_BY(counters_mu) = false;
int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0;
int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0;
int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0;
int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0;
bool done_blocking ABSL_GUARDED_BY(counters_mu) = false;

// Function to be called from absl::call_once. Waits for a notification.
void WaitAndIncrement() {
Expand All @@ -60,7 +60,7 @@ void ThreadBody() {
}

// Returns true if all threads are set up for the test.
bool ThreadsAreSetup(void*) EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
// All ten threads must be running, and WaitAndIncrement should be blocked.
return running_thread_count == 10 && call_once_invoke_count == 1;
}
Expand Down
12 changes: 6 additions & 6 deletions absl/base/internal/low_level_alloc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,9 @@ struct LowLevelAlloc::Arena {

base_internal::SpinLock mu;
// Head of free list, sorted by address
AllocList freelist GUARDED_BY(mu);
AllocList freelist ABSL_GUARDED_BY(mu);
// Count of allocated blocks
int32_t allocation_count GUARDED_BY(mu);
int32_t allocation_count ABSL_GUARDED_BY(mu);
// flags passed to NewArena
const uint32_t flags;
// Result of sysconf(_SC_PAGESIZE)
Expand All @@ -215,7 +215,7 @@ struct LowLevelAlloc::Arena {
// Smallest allocation block size
const size_t min_size;
// PRNG state
uint32_t random GUARDED_BY(mu);
uint32_t random ABSL_GUARDED_BY(mu);
};

namespace {
Expand Down Expand Up @@ -275,10 +275,10 @@ static const uintptr_t kMagicAllocated = 0x4c833e95U;
static const uintptr_t kMagicUnallocated = ~kMagicAllocated;

namespace {
class SCOPED_LOCKABLE ArenaLock {
class ABSL_SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: arena_(arena) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
Expand All @@ -290,7 +290,7 @@ class SCOPED_LOCKABLE ArenaLock {
arena_->mu.Lock();
}
~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
void Leave() UNLOCK_FUNCTION() {
void Leave() ABSL_UNLOCK_FUNCTION() {
arena_->mu.Unlock();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (mask_valid_) {
Expand Down
14 changes: 7 additions & 7 deletions absl/base/internal/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
namespace absl {
namespace base_internal {

class LOCKABLE SpinLock {
class ABSL_LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
Expand Down Expand Up @@ -79,7 +79,7 @@ class LOCKABLE SpinLock {
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }

// Acquire this SpinLock.
inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
if (!TryLockImpl()) {
SlowLock();
Expand All @@ -91,7 +91,7 @@ class LOCKABLE SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
Expand All @@ -101,7 +101,7 @@ class LOCKABLE SpinLock {
}

// Release this SpinLock, which must be held by the calling thread.
inline void Unlock() UNLOCK_FUNCTION() {
inline void Unlock() ABSL_UNLOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
Expand Down Expand Up @@ -179,13 +179,13 @@ class LOCKABLE SpinLock {

// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }

SpinLockHolder(const SpinLockHolder&) = delete;
SpinLockHolder& operator=(const SpinLockHolder&) = delete;
Expand Down
4 changes: 2 additions & 2 deletions absl/container/internal/hashtablez_sampler.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ struct HashtablezInfo {

// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling() EXCLUSIVE_LOCKS_REQUIRED(init_mu);
void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);

// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
Expand All @@ -83,7 +83,7 @@ struct HashtablezInfo {
// prevents races with sampling and resurrecting an object.
absl::Mutex init_mu;
HashtablezInfo* next;
HashtablezInfo* dead GUARDED_BY(init_mu);
HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);

// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
Expand Down
Loading

0 comments on commit 83c1d65

Please sign in to comment.