Skip to content

Commit

Permalink
[cleanup] Clean-up CAS loops
Browse files Browse the repository at this point in the history
Clean-up a couple of CAS loops to avoid loading after a compare_exchange
(which updates the old value), and to loosen the memory ordering to
acquire-release to avoid unnecessary fences.

Change-Id: Ifb8e5e5136f687ca5a71417a5d131a7023add054
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2050390
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66235}
  • Loading branch information
LeszekSwirski authored and Commit Bot committed Feb 12, 2020
1 parent 9094a41 commit 2201516
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 27 deletions.
9 changes: 5 additions & 4 deletions src/base/atomic-utils.h
Expand Up @@ -130,13 +130,14 @@ class AsAtomicImpl {
static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
T old_value = Relaxed_Load(addr);
T new_value, old_value_before_cas;
do {
old_value = Relaxed_Load(addr);
if ((old_value & mask) == bits) return false;
new_value = (old_value & ~mask) | bits;
} while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
old_value_before_cas = old_value;
old_value = Release_CompareAndSwap(addr, old_value, new_value);
} while (old_value != old_value_before_cas);
return true;
}

Expand Down
29 changes: 14 additions & 15 deletions src/heap/spaces.h
Expand Up @@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_

#include <atomic>
#include <list>
#include <map>
#include <memory>
Expand Down Expand Up @@ -660,12 +661,11 @@ class MemoryChunk : public BasicMemoryChunk {
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0;
do {
old_mark = chunk->high_water_mark_;
} while (
(new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
while ((new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(
old_mark, new_mark, std::memory_order_acq_rel)) {
}
}

static inline void MoveExternalBackingStoreBytes(
Expand Down Expand Up @@ -1453,15 +1453,14 @@ class MemoryAllocator {
// The use of atomic primitives does not guarantee correctness (wrt.
// desired semantics) by default. The loop here ensures that we update the
// values only if they did not change in between.
Address ptr = kNullAddress;
do {
ptr = lowest_ever_allocated_;
} while ((low < ptr) &&
!lowest_ever_allocated_.compare_exchange_weak(ptr, low));
do {
ptr = highest_ever_allocated_;
} while ((high > ptr) &&
!highest_ever_allocated_.compare_exchange_weak(ptr, high));
Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
ptr, low, std::memory_order_acq_rel)) {
}
ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
ptr, high, std::memory_order_acq_rel)) {
}
}

void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
Expand Down
9 changes: 4 additions & 5 deletions src/objects/backing-store.cc
Expand Up @@ -126,12 +126,12 @@ inline void DebugCheckZero(void* start, size_t byte_length) {

bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
while (true) {
uint64_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
if (reserved_address_space_.compare_exchange_weak(
old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
Expand Down Expand Up @@ -457,10 +457,9 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
// permissions for the entire range (to be RW), so the operating system
// should deal with that raciness. We know we succeeded when we can
// compare/swap the old length with the new length.
size_t old_length = 0;
size_t old_length = byte_length_.load(std::memory_order_relaxed);
size_t new_length = 0;
while (true) {
old_length = byte_length_.load(std::memory_order_acquire);
size_t current_pages = old_length / wasm::kWasmPageSize;

// Check if we have exceed the supplied maximum.
Expand Down
7 changes: 4 additions & 3 deletions src/wasm/wasm-memory.cc
Expand Up @@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include <atomic>
#include <limits>

#include "src/heap/heap-inl.h"
Expand Down Expand Up @@ -174,12 +175,12 @@ void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,

bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
size_t reservation_limit = kAddressSpaceLimit;
size_t old_count = reserved_address_space_.load(std::memory_order_relaxed;
while (true) {
size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
if (reserved_address_space_.compare_exchange_weak(
old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
Expand Down

0 comments on commit 2201516

Please sign in to comment.