Permalink
Browse files

udp_wrap, stream_wrap: lazy init slab allocator

Create slab allocator when binding is initialized.

Add an AtExit handler to destroy the slab before the VM shuts down, it can't be
disposed when V8 is dead and Valgrind will complain about memory leaks.
  • Loading branch information...
1 parent cc0e7ef commit 27061cc9f45afbc4ddc1efa8bed1ea22df7cb0f4 @bnoordhuis bnoordhuis committed Jun 5, 2012
Showing with 29 additions and 11 deletions.
  1. +15 −6 src/stream_wrap.cc
  2. +14 −5 src/udp_wrap.cc
View
@@ -76,14 +76,23 @@ static Persistent<String> bytes_sym;
static Persistent<String> write_queue_size_sym;
static Persistent<String> onread_sym;
static Persistent<String> oncomplete_sym;
-static SlabAllocator slab_allocator(SLAB_SIZE);
+static SlabAllocator* slab_allocator;
static bool initialized;
+static void DeleteSlabAllocator(void*) {
+ delete slab_allocator;
+ slab_allocator = NULL;
+}
+
+
void StreamWrap::Initialize(Handle<Object> target) {
if (initialized) return;
initialized = true;
+ slab_allocator = new SlabAllocator(SLAB_SIZE);
+ AtExit(DeleteSlabAllocator, NULL);
+
HandleScope scope;
HandleWrap::Initialize(target);
@@ -156,7 +165,7 @@ Handle<Value> StreamWrap::ReadStop(const Arguments& args) {
uv_buf_t StreamWrap::OnAlloc(uv_handle_t* handle, size_t suggested_size) {
StreamWrap* wrap = static_cast<StreamWrap*>(handle->data);
assert(wrap->stream_ == reinterpret_cast<uv_stream_t*>(handle));
- char* buf = slab_allocator.Allocate(wrap->object_, suggested_size);
+ char* buf = slab_allocator->Allocate(wrap->object_, suggested_size);
return uv_buf_init(buf, suggested_size);
}
@@ -175,7 +184,7 @@ void StreamWrap::OnReadCommon(uv_stream_t* handle, ssize_t nread,
// If libuv reports an error or EOF it *may* give us a buffer back. In that
// case, return the space to the slab.
if (buf.base != NULL) {
- slab_allocator.Shrink(wrap->object_, buf.base, 0);
+ slab_allocator->Shrink(wrap->object_, buf.base, 0);
}
SetErrno(uv_last_error(uv_default_loop()));
@@ -184,9 +193,9 @@ void StreamWrap::OnReadCommon(uv_stream_t* handle, ssize_t nread,
}
assert(buf.base != NULL);
- Local<Object> slab = slab_allocator.Shrink(wrap->object_,
- buf.base,
- nread);
+ Local<Object> slab = slab_allocator->Shrink(wrap->object_,
+ buf.base,
+ nread);
if (nread == 0) return;
assert(static_cast<size_t>(nread) <= buf.len);
View
@@ -58,7 +58,13 @@ Local<Object> AddressToJS(const sockaddr* addr);
static Persistent<String> buffer_sym;
static Persistent<String> oncomplete_sym;
static Persistent<String> onmessage_sym;
-static SlabAllocator slab_allocator(SLAB_SIZE);
+static SlabAllocator* slab_allocator;
+
+
+static void DeleteSlabAllocator(void*) {
+ delete slab_allocator;
+ slab_allocator = NULL;
+}
UDPWrap::UDPWrap(Handle<Object> object): HandleWrap(object,
@@ -76,6 +82,9 @@ UDPWrap::~UDPWrap() {
void UDPWrap::Initialize(Handle<Object> target) {
HandleWrap::Initialize(target);
+ slab_allocator = new SlabAllocator(SLAB_SIZE);
+ AtExit(DeleteSlabAllocator, NULL);
+
HandleScope scope;
buffer_sym = NODE_PSYMBOL("buffer");
@@ -352,7 +361,7 @@ void UDPWrap::OnSend(uv_udp_send_t* req, int status) {
uv_buf_t UDPWrap::OnAlloc(uv_handle_t* handle, size_t suggested_size) {
UDPWrap* wrap = static_cast<UDPWrap*>(handle->data);
- char* buf = slab_allocator.Allocate(wrap->object_, suggested_size);
+ char* buf = slab_allocator->Allocate(wrap->object_, suggested_size);
return uv_buf_init(buf, suggested_size);
}
@@ -365,9 +374,9 @@ void UDPWrap::OnRecv(uv_udp_t* handle,
HandleScope scope;
UDPWrap* wrap = reinterpret_cast<UDPWrap*>(handle->data);
- Local<Object> slab = slab_allocator.Shrink(wrap->object_,
- buf.base,
- nread < 0 ? 0 : nread);
+ Local<Object> slab = slab_allocator->Shrink(wrap->object_,
+ buf.base,
+ nread < 0 ? 0 : nread);
if (nread == 0) return;
if (nread < 0) {

0 comments on commit 27061cc

Please sign in to comment.