Skip to content
Permalink
Browse files
[JSC] Wire memory allocation reporting to ArrayBuffer resize / grow
https://bugs.webkit.org/show_bug.cgi?id=248369
rdar://102685157

Reviewed by Ross Kirsling.

Add vm.heap.reportExtraMemoryAllocated when ArrayBuffer resize / grow extends its allocated memory.

* Source/JavaScriptCore/runtime/ArrayBuffer.cpp:
(JSC::ArrayBuffer::grow):
(JSC::ArrayBuffer::resize):
(JSC::SharedArrayBufferContents::grow):
* Source/JavaScriptCore/runtime/ArrayBuffer.h:

Canonical link: https://commits.webkit.org/257042@main
  • Loading branch information
Constellation committed Nov 27, 2022
1 parent fd9594c commit 9ce25c632ccd854836b77fe74612cfa097574932
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 81 deletions.
@@ -431,99 +431,110 @@ void ArrayBuffer::notifyDetaching(VM& vm)
m_detachingWatchpointSet.fireAll(vm, "Array buffer was detached");
}

Expected<void, GrowFailReason> ArrayBuffer::grow(VM& vm, size_t newByteLength)
Expected<int64_t, GrowFailReason> ArrayBuffer::grow(VM& vm, size_t newByteLength)
{
auto shared = m_contents.m_shared;
if (UNLIKELY(!shared))
return makeUnexpected(GrowFailReason::GrowSharedUnavailable);
return shared->grow(vm, newByteLength);
auto result = shared->grow(vm, newByteLength);
if (result && result.value() > 0)
vm.heap.reportExtraMemoryAllocated(result.value());
return result;
}

Expected<void, GrowFailReason> ArrayBuffer::resize(VM& vm, size_t newByteLength)
Expected<int64_t, GrowFailReason> ArrayBuffer::resize(VM& vm, size_t newByteLength)
{
auto memoryHandle = m_contents.m_memoryHandle;
if (UNLIKELY(!memoryHandle || m_contents.m_shared))
return makeUnexpected(GrowFailReason::GrowSharedUnavailable);

Locker { memoryHandle->lock() };

// Keep in mind that newByteLength may not be page-size-aligned.
if (m_contents.m_maxByteLength < newByteLength)
return makeUnexpected(GrowFailReason::InvalidGrowSize);

if (m_contents.m_sizeInBytes == newByteLength)
return { };

auto newPageCount = PageCount::fromBytesWithRoundUp(newByteLength);
auto oldPageCount = PageCount::fromBytes(memoryHandle->size()); // MemoryHandle's size is always page-size aligned.
if (newPageCount.bytes() > MAX_ARRAY_BUFFER_SIZE)
return makeUnexpected(GrowFailReason::WouldExceedMaximum);

if (newPageCount != oldPageCount) {
ASSERT(memoryHandle->maximum() >= newPageCount);
size_t desiredSize = newPageCount.bytes();
RELEASE_ASSERT(desiredSize <= MAX_ARRAY_BUFFER_SIZE);

if (desiredSize > memoryHandle->size()) {
size_t bytesToAdd = desiredSize - memoryHandle->size();
ASSERT(bytesToAdd);
ASSERT(roundUpToMultipleOf<PageCount::pageSize>(bytesToAdd) == bytesToAdd);
bool allocationSuccess = tryAllocate(&vm,
[&] () -> BufferMemoryResult::Kind {
return BufferMemoryManager::singleton().tryAllocatePhysicalBytes(bytesToAdd);
});
if (!allocationSuccess)
return makeUnexpected(GrowFailReason::OutOfMemory);

void* memory = memoryHandle->memory();
RELEASE_ASSERT(memory);

// Signaling memory must have been pre-allocated virtually.
uint8_t* startAddress = static_cast<uint8_t*>(memory) + memoryHandle->size();

dataLogLnIf(ArrayBufferInternal::verbose, "Marking memory's ", RawPointer(memory), " as read+write in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + bytesToAdd), ")");
constexpr bool readable = true;
constexpr bool writable = true;
if (!OSAllocator::protect(startAddress, bytesToAdd, readable, writable)) {
int64_t deltaByteLength = 0;
{
Locker { memoryHandle->lock() };

// Keep in mind that newByteLength may not be page-size-aligned.
if (m_contents.m_maxByteLength < newByteLength)
return makeUnexpected(GrowFailReason::InvalidGrowSize);

deltaByteLength = static_cast<int64_t>(newByteLength) - static_cast<int64_t>(m_contents.m_sizeInBytes);
if (!deltaByteLength)
return 0;

auto newPageCount = PageCount::fromBytesWithRoundUp(newByteLength);
auto oldPageCount = PageCount::fromBytes(memoryHandle->size()); // MemoryHandle's size is always page-size aligned.
if (newPageCount.bytes() > MAX_ARRAY_BUFFER_SIZE)
return makeUnexpected(GrowFailReason::WouldExceedMaximum);

if (newPageCount != oldPageCount) {
ASSERT(memoryHandle->maximum() >= newPageCount);
size_t desiredSize = newPageCount.bytes();
RELEASE_ASSERT(desiredSize <= MAX_ARRAY_BUFFER_SIZE);

if (desiredSize > memoryHandle->size()) {
size_t bytesToAdd = desiredSize - memoryHandle->size();
ASSERT(bytesToAdd);
ASSERT(roundUpToMultipleOf<PageCount::pageSize>(bytesToAdd) == bytesToAdd);
bool allocationSuccess = tryAllocate(&vm,
[&] () -> BufferMemoryResult::Kind {
return BufferMemoryManager::singleton().tryAllocatePhysicalBytes(bytesToAdd);
});
if (!allocationSuccess)
return makeUnexpected(GrowFailReason::OutOfMemory);

void* memory = memoryHandle->memory();
RELEASE_ASSERT(memory);

// Signaling memory must have been pre-allocated virtually.
uint8_t* startAddress = static_cast<uint8_t*>(memory) + memoryHandle->size();

dataLogLnIf(ArrayBufferInternal::verbose, "Marking memory's ", RawPointer(memory), " as read+write in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + bytesToAdd), ")");
constexpr bool readable = true;
constexpr bool writable = true;
if (!OSAllocator::protect(startAddress, bytesToAdd, readable, writable)) {
#if OS(WINDOWS)
dataLogLn("mprotect failed: ", static_cast<int>(GetLastError()));
dataLogLn("mprotect failed: ", static_cast<int>(GetLastError()));
#else
dataLogLn("mprotect failed: ", safeStrerror(errno).data());
dataLogLn("mprotect failed: ", safeStrerror(errno).data());
#endif
RELEASE_ASSERT_NOT_REACHED();
}
} else {
size_t bytesToSubtract = memoryHandle->size() - desiredSize;
ASSERT(bytesToSubtract);
ASSERT(roundUpToMultipleOf<PageCount::pageSize>(bytesToSubtract) == bytesToSubtract);
BufferMemoryManager::singleton().freePhysicalBytes(bytesToSubtract);

void* memory = memoryHandle->memory();
RELEASE_ASSERT(memory);

// Signaling memory must have been pre-allocated virtually.
uint8_t* startAddress = static_cast<uint8_t*>(memory) + desiredSize;

dataLogLnIf(ArrayBufferInternal::verbose, "Marking memory's ", RawPointer(memory), " as none in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + bytesToSubtract), ")");
constexpr bool readable = false;
constexpr bool writable = false;
if (!OSAllocator::protect(startAddress, bytesToSubtract, readable, writable)) {
RELEASE_ASSERT_NOT_REACHED();
}
} else {
size_t bytesToSubtract = memoryHandle->size() - desiredSize;
ASSERT(bytesToSubtract);
ASSERT(roundUpToMultipleOf<PageCount::pageSize>(bytesToSubtract) == bytesToSubtract);
BufferMemoryManager::singleton().freePhysicalBytes(bytesToSubtract);

void* memory = memoryHandle->memory();
RELEASE_ASSERT(memory);

// Signaling memory must have been pre-allocated virtually.
uint8_t* startAddress = static_cast<uint8_t*>(memory) + desiredSize;

dataLogLnIf(ArrayBufferInternal::verbose, "Marking memory's ", RawPointer(memory), " as none in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + bytesToSubtract), ")");
constexpr bool readable = false;
constexpr bool writable = false;
if (!OSAllocator::protect(startAddress, bytesToSubtract, readable, writable)) {
#if OS(WINDOWS)
dataLogLn("mprotect failed: ", static_cast<int>(GetLastError()));
dataLogLn("mprotect failed: ", static_cast<int>(GetLastError()));
#else
dataLogLn("mprotect failed: ", safeStrerror(errno).data());
dataLogLn("mprotect failed: ", safeStrerror(errno).data());
#endif
RELEASE_ASSERT_NOT_REACHED();
RELEASE_ASSERT_NOT_REACHED();
}
}
memoryHandle->updateSize(desiredSize);
}
memoryHandle->updateSize(desiredSize);

if (m_contents.m_sizeInBytes < newByteLength)
memset(bitwise_cast<uint8_t*>(data()) + m_contents.m_sizeInBytes, 0, newByteLength - m_contents.m_sizeInBytes);

m_contents.m_sizeInBytes = newByteLength;
}

if (m_contents.m_sizeInBytes < newByteLength)
memset(bitwise_cast<uint8_t*>(data()) + m_contents.m_sizeInBytes, 0, newByteLength - m_contents.m_sizeInBytes);
if (deltaByteLength > 0)
vm.heap.reportExtraMemoryAllocated(deltaByteLength);

m_contents.m_sizeInBytes = newByteLength;
return { };
return deltaByteLength;
}

RefPtr<ArrayBuffer> ArrayBuffer::tryCreateShared(VM& vm, size_t numElements, unsigned elementByteSize, size_t maxByteLength)
@@ -541,23 +552,24 @@ RefPtr<ArrayBuffer> ArrayBuffer::tryCreateShared(VM& vm, size_t numElements, uns
return createShared(SharedArrayBufferContents::create(memory, sizeInBytes.value(), maxByteLength, WTFMove(handle), nullptr, SharedArrayBufferContents::Mode::Default));
}

Expected<void, GrowFailReason> SharedArrayBufferContents::grow(VM& vm, size_t newByteLength)
Expected<int64_t, GrowFailReason> SharedArrayBufferContents::grow(VM& vm, size_t newByteLength)
{
if (!m_hasMaxByteLength)
return makeUnexpected(GrowFailReason::GrowSharedUnavailable);
ASSERT(m_memoryHandle);
return grow(Locker { m_memoryHandle->lock() }, vm, newByteLength);
}

Expected<void, GrowFailReason> SharedArrayBufferContents::grow(const AbstractLocker&, VM& vm, size_t newByteLength)
Expected<int64_t, GrowFailReason> SharedArrayBufferContents::grow(const AbstractLocker&, VM& vm, size_t newByteLength)
{
// Keep in mind that newByteLength may not be page-size-aligned.
size_t sizeInBytes = m_sizeInBytes.load(std::memory_order_seq_cst);
if (sizeInBytes > newByteLength || m_maxByteLength < newByteLength)
return makeUnexpected(GrowFailReason::InvalidGrowSize);

if (sizeInBytes == newByteLength)
return { };
int64_t deltaByteLength = newByteLength - sizeInBytes;
if (!deltaByteLength)
return 0;

auto newPageCount = PageCount::fromBytesWithRoundUp(newByteLength);
auto oldPageCount = PageCount::fromBytes(m_memoryHandle->size()); // MemoryHandle's size is always page-size aligned.
@@ -603,7 +615,7 @@ Expected<void, GrowFailReason> SharedArrayBufferContents::grow(const AbstractLoc
memset(bitwise_cast<uint8_t*>(data()) + sizeInBytes, 0, newByteLength - sizeInBytes);

updateSize(newByteLength);
return { };
return deltaByteLength;
}

ASCIILiteral errorMesasgeForTransfer(ArrayBuffer* buffer)
@@ -84,8 +84,8 @@ class SharedArrayBufferContents final : public ThreadSafeRefCounted<SharedArrayB

Mode mode() const { return m_mode; }

Expected<void, GrowFailReason> grow(VM&, size_t newByteLength);
Expected<void, GrowFailReason> grow(const AbstractLocker&, VM&, size_t newByteLength);
Expected<int64_t, GrowFailReason> grow(VM&, size_t newByteLength);
Expected<int64_t, GrowFailReason> grow(const AbstractLocker&, VM&, size_t newByteLength);

void updateSize(size_t sizeInBytes, std::memory_order order = std::memory_order_seq_cst)
{
@@ -318,8 +318,8 @@ class ArrayBuffer final : public GCIncomingRefCounted<ArrayBuffer> {

JS_EXPORT_PRIVATE static Ref<SharedTask<void(void*)>> primitiveGigacageDestructor();

Expected<void, GrowFailReason> grow(VM&, size_t newByteLength);
Expected<void, GrowFailReason> resize(VM&, size_t newByteLength);
Expected<int64_t, GrowFailReason> grow(VM&, size_t newByteLength);
Expected<int64_t, GrowFailReason> resize(VM&, size_t newByteLength);

private:
static Ref<ArrayBuffer> create(size_t numElements, unsigned elementByteSize, ArrayBufferContents::InitializationPolicy);
@@ -271,7 +271,7 @@ Expected<PageCount, GrowFailReason> Memory::growShared(VM& vm, PageCount delta)

PageCount oldPageCount;
PageCount newPageCount;
Expected<void, GrowFailReason> result;
Expected<int64_t, GrowFailReason> result;
{
std::optional<Locker<Lock>> locker;
// m_shared may not be exist, if this is zero byte memory with zero byte maximum size.

0 comments on commit 9ce25c6

Please sign in to comment.