Skip to content

Commit

Permalink
Merge r183959 - Release assert in com.apple.WebKit.WebContent under J…
Browse files Browse the repository at this point in the history
…avaScriptCore: JSC::JSONProtoFuncStringify

https://bugs.webkit.org/show_bug.cgi?id=144758

Reviewed by Andreas Kling.

This was an out-of-memory error when trying to shrink a string builder.
bmalloc was missing the optimization that allowed realloc() to shrink
without copying. So, let's add it.

* bmalloc/Allocator.cpp:
(bmalloc::Allocator::reallocate): Added Large and XLarge cases for
shrinking without copying. This isn't possible for small and medium
objects, and probably not very profitable, either.

* bmalloc/Heap.cpp:
(bmalloc::Heap::findXLarge):
(bmalloc::Heap::deallocateXLarge):
* bmalloc/Heap.h: Refactored this code to return a reference to an
XLarge range. This makes the code reusable, and also makes it easier
for realloc() to update metadata.

* bmalloc/LargeObject.h:
(bmalloc::LargeObject::split): Allow allocated objects to split because
that's what realloc() wants to do, and there's nothing intrinsically
wrong with it.
  • Loading branch information
geoffreygaren authored and carlosgcampos committed May 12, 2015
1 parent 62beef3 commit 6cc00f6
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 24 deletions.
28 changes: 28 additions & 0 deletions Source/bmalloc/ChangeLog
@@ -1,3 +1,31 @@
2015-05-07 Geoffrey Garen <ggaren@apple.com>

Release assert in com.apple.WebKit.WebContent under JavaScriptCore: JSC::JSONProtoFuncStringify
https://bugs.webkit.org/show_bug.cgi?id=144758

Reviewed by Andreas Kling.

This was an out-of-memory error when trying to shrink a string builder.
bmalloc was missing the optimization that allowed realloc() to shrink
without copying. So, let's add it.

* bmalloc/Allocator.cpp:
(bmalloc::Allocator::reallocate): Added Large and XLarge cases for
shrinking without copying. This isn't possible for small and medium
objects, and probably not very profitable, either.

* bmalloc/Heap.cpp:
(bmalloc::Heap::findXLarge):
(bmalloc::Heap::deallocateXLarge):
* bmalloc/Heap.h: Refactored this code to return a reference to an
XLarge range. This makes the code reusable, and also makes it easier
for realloc() to update metadata.

* bmalloc/LargeObject.h:
(bmalloc::LargeObject::split): Allow allocated objects to split because
that's what realloc() wants to do, and there's nothing intrinsically
wrong with it.

2015-03-12 Geoffrey Garen <ggaren@apple.com>

Assertion failure in bmalloc::LargeObject::validateSelf on Mavericks Debug layout test bot
Expand Down
40 changes: 32 additions & 8 deletions Source/bmalloc/bmalloc/Allocator.cpp
Expand Up @@ -112,10 +112,6 @@ void* Allocator::reallocate(void* object, size_t newSize)
if (!m_isBmallocEnabled)
return realloc(object, newSize);

void* result = allocate(newSize);
if (!object)
return result;

size_t oldSize = 0;
switch (objectType(object)) {
case Small: {
Expand All @@ -129,20 +125,48 @@ void* Allocator::reallocate(void* object, size_t newSize)
break;
}
case Large: {
std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
LargeObject largeObject(object);
oldSize = largeObject.size();

if (newSize < oldSize && newSize > mediumMax) {
newSize = roundUpToMultipleOf<largeAlignment>(newSize);
if (oldSize - newSize >= largeMin) {
std::pair<LargeObject, LargeObject> split = largeObject.split(newSize);

lock.unlock();
m_deallocator.deallocate(split.second.begin());
lock.lock();
}
return object;
}
break;
}
case XLarge: {
std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
Range range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object);
RELEASE_BASSERT(range);
BASSERT(objectType(nullptr) == XLarge);
if (!object)
break;

std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
Range& range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object);
oldSize = range.size();

if (newSize < oldSize && newSize > largeMax) {
newSize = roundUpToMultipleOf<xLargeAlignment>(newSize);
if (oldSize - newSize >= xLargeAlignment) {
lock.unlock();
vmDeallocate(static_cast<char*>(object) + oldSize, oldSize - newSize);
lock.lock();

range = Range(object, newSize);
}
return object;
}
break;
}
}

void* result = allocate(newSize);
size_t copySize = std::min(oldSize, newSize);
memcpy(result, object, copySize);
m_deallocator.deallocate(object);
Expand Down
1 change: 1 addition & 0 deletions Source/bmalloc/bmalloc/Deallocator.cpp
Expand Up @@ -99,6 +99,7 @@ void Deallocator::deallocateSlowCase(void* object)
return;
}

BASSERT(objectType(nullptr) == XLarge);
if (!object)
return;

Expand Down
20 changes: 7 additions & 13 deletions Source/bmalloc/bmalloc/Heap.cpp
Expand Up @@ -306,31 +306,25 @@ void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, s
return result;
}

Range Heap::findXLarge(std::lock_guard<StaticMutex>&, void* object)
Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
{
for (auto& range : m_xLargeObjects) {
if (range.begin() != object)
continue;
return range;
}

return Range();
RELEASE_BASSERT(false);
return *static_cast<Range*>(nullptr); // Silence compiler error.
}

void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
{
for (auto& range : m_xLargeObjects) {
if (range.begin() != object)
continue;

Range toDeallocate = m_xLargeObjects.pop(&range);
Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));

lock.unlock();
vmDeallocate(toDeallocate.begin(), toDeallocate.size());
lock.lock();

break;
}
lock.unlock();
vmDeallocate(toDeallocate.begin(), toDeallocate.size());
lock.lock();
}

void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size)
Expand Down
2 changes: 1 addition & 1 deletion Source/bmalloc/bmalloc/Heap.h
Expand Up @@ -66,7 +66,7 @@ class Heap {
void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t);
void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
void* tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
Range findXLarge(std::lock_guard<StaticMutex>&, void*);
Range& findXLarge(std::unique_lock<StaticMutex>&, void*);
void deallocateXLarge(std::unique_lock<StaticMutex>&, void*);

void scavenge(std::unique_lock<StaticMutex>&, std::chrono::milliseconds sleepDuration);
Expand Down
2 changes: 0 additions & 2 deletions Source/bmalloc/bmalloc/LargeObject.h
Expand Up @@ -206,8 +206,6 @@ inline LargeObject LargeObject::merge() const

inline std::pair<LargeObject, LargeObject> LargeObject::split(size_t size) const
{
BASSERT(isFree());

Range split(begin(), size);
Range leftover = Range(split.end(), this->size() - size);
BASSERT(leftover.size() >= largeMin);
Expand Down

0 comments on commit 6cc00f6

Please sign in to comment.