Skip to content

Commit

Permalink
Merge r223965 - Unreviewed, rolling out r222945.
Browse files Browse the repository at this point in the history
https://bugs.webkit.org/show_bug.cgi?id=178818

"It made WasmBench crash" (Requested by saamyjoon on #webkit).

Reverted changeset:

"bmalloc mutex should be adaptive"
https://bugs.webkit.org/show_bug.cgi?id=177839
https://trac.webkit.org/changeset/222945
  • Loading branch information
webkit-commit-queue authored and carlosgcampos committed Oct 27, 2017
1 parent a91c483 commit c9be7a2
Show file tree
Hide file tree
Showing 8 changed files with 83 additions and 303 deletions.
13 changes: 13 additions & 0 deletions Source/WTF/ChangeLog
@@ -1,3 +1,16 @@
2017-10-25 Commit Queue <commit-queue@webkit.org>

Unreviewed, rolling out r222945.
https://bugs.webkit.org/show_bug.cgi?id=178818

"It made WasmBench crash" (Requested by saamyjoon on #webkit).

Reverted changeset:

"bmalloc mutex should be adaptive"
https://bugs.webkit.org/show_bug.cgi?id=177839
https://trac.webkit.org/changeset/222945

2017-10-04 Filip Pizlo <fpizlo@apple.com>

bmalloc mutex should be adaptive
Expand Down
4 changes: 0 additions & 4 deletions Source/WTF/wtf/LockAlgorithmInlines.h
Expand Up @@ -33,10 +33,6 @@
// the lock algorithm slow path without recompiling the world. Right now this should be included in two
// places (Lock.cpp and JSCell.cpp).

// NOTE: It's a bug to use any memory order other than seq_cst in this code. The cost of seq_cst
// fencing is negligible on slow paths, so any use of a more relaxed memory model is all risk and no
// reward.

namespace WTF {

template<typename LockType, LockType isHeldBit, LockType hasParkedBit>
Expand Down
15 changes: 4 additions & 11 deletions Source/WTF/wtf/WordLock.cpp
Expand Up @@ -76,10 +76,6 @@ ThreadData* myThreadData()

} // anonymous namespace

// NOTE: It's a bug to use any memory order other than seq_cst in this code. The cost of seq_cst
// fencing is negligible on slow paths, so any use of a more relaxed memory model is all risk and no
// reward.

NEVER_INLINE void WordLockBase::lockSlow()
{
unsigned spinCount = 0;
Expand Down Expand Up @@ -227,8 +223,7 @@ NEVER_INLINE void WordLockBase::unlockSlow()
ASSERT(currentWordValue & isLockedBit);
ASSERT(currentWordValue & isQueueLockedBit);
ThreadData* queueHead = bitwise_cast<ThreadData*>(currentWordValue & ~queueHeadMask);
RELEASE_ASSERT(queueHead);
RELEASE_ASSERT(queueHead->shouldPark); // This would have been set before the thread was enqueued, so it must still be set now.
ASSERT(queueHead);

ThreadData* newQueueHead = queueHead->nextInQueue;
// Either this was the only thread on the queue, in which case we delete the queue, or there
Expand Down Expand Up @@ -261,12 +256,10 @@ NEVER_INLINE void WordLockBase::unlockSlow()
{
std::lock_guard<std::mutex> locker(queueHead->parkingLock);
queueHead->shouldPark = false;
// We have to do the notify while still holding the lock, since otherwise, we could try to
// do it after the queueHead has destructed. It's impossible for queueHead to destruct
// while we hold the lock, since it is either currently in the wait loop or it's before it
// so it has to grab the lock before destructing.
queueHead->parkingCondition.notify_one();
}
// Doesn't matter if we notify_all() or notify_one() here since the only thread that could be
// waiting is queueHead.
queueHead->parkingCondition.notify_one();

// The old queue head can now contend for the lock again. We're done!
}
Expand Down
13 changes: 13 additions & 0 deletions Source/bmalloc/ChangeLog
@@ -1,3 +1,16 @@
2017-10-25 Commit Queue <commit-queue@webkit.org>

Unreviewed, rolling out r222945.
https://bugs.webkit.org/show_bug.cgi?id=178818

"It made WasmBench crash" (Requested by saamyjoon on #webkit).

Reverted changeset:

"bmalloc mutex should be adaptive"
https://bugs.webkit.org/show_bug.cgi?id=177839
https://trac.webkit.org/changeset/222945

2017-10-23 Zan Dobersek <zdobersek@igalia.com>

bmalloc::api::tryLargeMemalignVirtual() shouldn't assert on a failed allocation
Expand Down
21 changes: 1 addition & 20 deletions Source/bmalloc/bmalloc/Algorithm.h
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014-2017 Apple Inc. All rights reserved.
* Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -28,7 +28,6 @@

#include "BAssert.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <cstddef>
#include <limits>
Expand Down Expand Up @@ -159,24 +158,6 @@ inline constexpr unsigned long log2(unsigned long value)
return bitCount<unsigned long>() - 1 - __builtin_clzl(value);
}

// We need a CAS API that isn't the badly designed one from C++.
template<typename T>
bool compareExchangeWeak(std::atomic<T>& word, T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
{
// They could have made a sensible CAS API, but they didn't. Sneaky fact: for no good reason, the
// C++ API will mutate expected. I think that apologists will say something about how it saves you
// reloading the value around the back of your CAS loop, but that's nonsense. The delay along the
// back edge of a CAS loop has almost no impact on CAS loop performance. More often than not, we
// want to *add* delays to the back edge, not remove them.
return word.compare_exchange_weak(expected, desired, order, std::memory_order_relaxed);
}

template<typename T>
bool compareExchangeStrong(std::atomic<T>& word, T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
{
return word.compare_exchange_strong(expected, desired, order, std::memory_order_relaxed);
}

} // namespace bmalloc

#endif // Algorithm_h
8 changes: 5 additions & 3 deletions Source/bmalloc/bmalloc/PerThread.h
Expand Up @@ -60,11 +60,11 @@ class PerThread {
static void destructor(void*);
};

#if HAVE_PTHREAD_MACHDEP_H

class Cache;
template<typename T> struct PerThreadStorage;

#if HAVE_PTHREAD_MACHDEP_H

// For now, we only support PerThread<PerHeapKind<Cache>>. We can expand to other types by
// using more keys.
template<> struct PerThreadStorage<PerHeapKind<Cache>> {
Expand All @@ -82,7 +82,7 @@ template<> struct PerThreadStorage<PerHeapKind<Cache>> {
}
};

#endif
#else

template<typename T> struct PerThreadStorage {
static bool s_didInitialize;
Expand Down Expand Up @@ -112,6 +112,8 @@ template<typename T> bool PerThreadStorage<T>::s_didInitialize;
template<typename T> pthread_key_t PerThreadStorage<T>::s_key;
template<typename T> std::once_flag PerThreadStorage<T>::s_onceFlag;

#endif

template<typename T>
BINLINE T* PerThread<T>::getFastCase()
{
Expand Down

0 comments on commit c9be7a2

Please sign in to comment.