Skip to content
This repository has been archived by the owner on Apr 22, 2023. It is now read-only.

Commit

Permalink
Browse files Browse the repository at this point in the history
udp_wrap, stream_wrap: lazy init slab allocator
Create slab allocator when binding is initialized.

Add an AtExit handler to destroy the slab before the VM shuts down, it can't be
disposed when V8 is dead and Valgrind will complain about memory leaks.
  • Loading branch information
bnoordhuis committed Jun 5, 2012
1 parent cc0e7ef commit 27061cc
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 11 deletions.
21 changes: 15 additions & 6 deletions src/stream_wrap.cc
Expand Up @@ -76,14 +76,23 @@ static Persistent<String> bytes_sym;
static Persistent<String> write_queue_size_sym;
static Persistent<String> onread_sym;
static Persistent<String> oncomplete_sym;
static SlabAllocator slab_allocator(SLAB_SIZE);
static SlabAllocator* slab_allocator;
static bool initialized;


static void DeleteSlabAllocator(void*) {
delete slab_allocator;
slab_allocator = NULL;
}


void StreamWrap::Initialize(Handle<Object> target) {
if (initialized) return;
initialized = true;

slab_allocator = new SlabAllocator(SLAB_SIZE);
AtExit(DeleteSlabAllocator, NULL);

HandleScope scope;

HandleWrap::Initialize(target);
Expand Down Expand Up @@ -156,7 +165,7 @@ Handle<Value> StreamWrap::ReadStop(const Arguments& args) {
uv_buf_t StreamWrap::OnAlloc(uv_handle_t* handle, size_t suggested_size) {
StreamWrap* wrap = static_cast<StreamWrap*>(handle->data);
assert(wrap->stream_ == reinterpret_cast<uv_stream_t*>(handle));
char* buf = slab_allocator.Allocate(wrap->object_, suggested_size);
char* buf = slab_allocator->Allocate(wrap->object_, suggested_size);
return uv_buf_init(buf, suggested_size);
}

Expand All @@ -175,7 +184,7 @@ void StreamWrap::OnReadCommon(uv_stream_t* handle, ssize_t nread,
// If libuv reports an error or EOF it *may* give us a buffer back. In that
// case, return the space to the slab.
if (buf.base != NULL) {
slab_allocator.Shrink(wrap->object_, buf.base, 0);
slab_allocator->Shrink(wrap->object_, buf.base, 0);
}

SetErrno(uv_last_error(uv_default_loop()));
Expand All @@ -184,9 +193,9 @@ void StreamWrap::OnReadCommon(uv_stream_t* handle, ssize_t nread,
}

assert(buf.base != NULL);
Local<Object> slab = slab_allocator.Shrink(wrap->object_,
buf.base,
nread);
Local<Object> slab = slab_allocator->Shrink(wrap->object_,
buf.base,
nread);

if (nread == 0) return;
assert(static_cast<size_t>(nread) <= buf.len);
Expand Down
19 changes: 14 additions & 5 deletions src/udp_wrap.cc
Expand Up @@ -58,7 +58,13 @@ Local<Object> AddressToJS(const sockaddr* addr);
static Persistent<String> buffer_sym;
static Persistent<String> oncomplete_sym;
static Persistent<String> onmessage_sym;
static SlabAllocator slab_allocator(SLAB_SIZE);
static SlabAllocator* slab_allocator;


static void DeleteSlabAllocator(void*) {
delete slab_allocator;
slab_allocator = NULL;
}


UDPWrap::UDPWrap(Handle<Object> object): HandleWrap(object,
Expand All @@ -76,6 +82,9 @@ UDPWrap::~UDPWrap() {
void UDPWrap::Initialize(Handle<Object> target) {
HandleWrap::Initialize(target);

slab_allocator = new SlabAllocator(SLAB_SIZE);
AtExit(DeleteSlabAllocator, NULL);

HandleScope scope;

buffer_sym = NODE_PSYMBOL("buffer");
Expand Down Expand Up @@ -352,7 +361,7 @@ void UDPWrap::OnSend(uv_udp_send_t* req, int status) {

uv_buf_t UDPWrap::OnAlloc(uv_handle_t* handle, size_t suggested_size) {
UDPWrap* wrap = static_cast<UDPWrap*>(handle->data);
char* buf = slab_allocator.Allocate(wrap->object_, suggested_size);
char* buf = slab_allocator->Allocate(wrap->object_, suggested_size);
return uv_buf_init(buf, suggested_size);
}

Expand All @@ -365,9 +374,9 @@ void UDPWrap::OnRecv(uv_udp_t* handle,
HandleScope scope;

UDPWrap* wrap = reinterpret_cast<UDPWrap*>(handle->data);
Local<Object> slab = slab_allocator.Shrink(wrap->object_,
buf.base,
nread < 0 ? 0 : nread);
Local<Object> slab = slab_allocator->Shrink(wrap->object_,
buf.base,
nread < 0 ? 0 : nread);
if (nread == 0) return;

if (nread < 0) {
Expand Down

0 comments on commit 27061cc

Please sign in to comment.