Skip to content

Commit

Permalink
src: unify thread pool work
Browse files Browse the repository at this point in the history
Instead of using the libuv mechanism directly, provide an internal
`ThreadPoolWork` wrapper that takes care of increasing/decreasing
the waiting request counter.

PR-URL: #19377
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: James M Snell <jasnell@gmail.com>
  • Loading branch information
addaleax committed May 14, 2018
1 parent 7153bec commit 2347ce8
Show file tree
Hide file tree
Showing 4 changed files with 108 additions and 128 deletions.
73 changes: 28 additions & 45 deletions src/node_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3338,7 +3338,7 @@ static napi_status ConvertUVErrorCode(int code) {
}

// Wrapper around uv_work_t which calls user-provided callbacks.
class Work : public node::AsyncResource {
class Work : public node::AsyncResource, public node::ThreadPoolWork {
private:
explicit Work(napi_env env,
v8::Local<v8::Object> async_resource,
Expand All @@ -3349,15 +3349,14 @@ class Work : public node::AsyncResource {
: AsyncResource(env->isolate,
async_resource,
*v8::String::Utf8Value(env->isolate, async_resource_name)),
_env(env),
_data(data),
_execute(execute),
_complete(complete) {
memset(&_request, 0, sizeof(_request));
_request.data = this;
ThreadPoolWork(node::Environment::GetCurrent(env->isolate)),
_env(env),
_data(data),
_execute(execute),
_complete(complete) {
}

~Work() { }
virtual ~Work() { }

public:
static Work* New(napi_env env,
Expand All @@ -3374,47 +3373,36 @@ class Work : public node::AsyncResource {
delete work;
}

static void ExecuteCallback(uv_work_t* req) {
Work* work = static_cast<Work*>(req->data);
work->_execute(work->_env, work->_data);
void DoThreadPoolWork() override {
_execute(_env, _data);
}

static void CompleteCallback(uv_work_t* req, int status) {
Work* work = static_cast<Work*>(req->data);
void AfterThreadPoolWork(int status) {
if (_complete == nullptr)
return;

if (work->_complete != nullptr) {
napi_env env = work->_env;
// Establish a handle scope here so that every callback doesn't have to.
// Also it is needed for the exception-handling below.
v8::HandleScope scope(_env->isolate);

// Establish a handle scope here so that every callback doesn't have to.
// Also it is needed for the exception-handling below.
v8::HandleScope scope(env->isolate);
node::Environment* env_ = node::Environment::GetCurrent(env->isolate);
env_->DecreaseWaitingRequestCounter();
CallbackScope callback_scope(this);

CallbackScope callback_scope(work);
NAPI_CALL_INTO_MODULE(_env,
_complete(_env, ConvertUVErrorCode(status), _data),
[this] (v8::Local<v8::Value> local_err) {
// If there was an unhandled exception in the complete callback,
// report it as a fatal exception. (There is no JavaScript on the
// callstack that can possibly handle it.)
v8impl::trigger_fatal_exception(_env, local_err);
});

NAPI_CALL_INTO_MODULE(env,
work->_complete(env, ConvertUVErrorCode(status), work->_data),
[env] (v8::Local<v8::Value> local_err) {
// If there was an unhandled exception in the complete callback,
// report it as a fatal exception. (There is no JavaScript on the
// callstack that can possibly handle it.)
v8impl::trigger_fatal_exception(env, local_err);
});

// Note: Don't access `work` after this point because it was
// likely deleted by the complete callback.
}
}

uv_work_t* Request() {
return &_request;
// Note: Don't access `work` after this point because it was
// likely deleted by the complete callback.
}

private:
napi_env _env;
void* _data;
uv_work_t _request;
napi_async_execute_callback _execute;
napi_async_complete_callback _complete;
};
Expand Down Expand Up @@ -3491,12 +3479,7 @@ napi_status napi_queue_async_work(napi_env env, napi_async_work work) {

uvimpl::Work* w = reinterpret_cast<uvimpl::Work*>(work);

node::Environment* env_ = node::Environment::GetCurrent(env->isolate);
env_->IncreaseWaitingRequestCounter();
CALL_UV(env, uv_queue_work(event_loop,
w->Request(),
uvimpl::Work::ExecuteCallback,
uvimpl::Work::CompleteCallback));
w->ScheduleWork();

return napi_clear_last_error(env);
}
Expand All @@ -3507,7 +3490,7 @@ napi_status napi_cancel_async_work(napi_env env, napi_async_work work) {

uvimpl::Work* w = reinterpret_cast<uvimpl::Work*>(work);

CALL_UV(env, uv_cancel(reinterpret_cast<uv_req_t*>(w->Request())));
CALL_UV(env, w->CancelWork());

return napi_clear_last_error(env);
}
Expand Down
99 changes: 30 additions & 69 deletions src/node_crypto.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4562,7 +4562,7 @@ bool ECDH::IsKeyPairValid() {
}


class PBKDF2Request : public AsyncWrap {
class PBKDF2Request : public AsyncWrap, public ThreadPoolWork {
public:
PBKDF2Request(Environment* env,
Local<Object> object,
Expand All @@ -4572,6 +4572,7 @@ class PBKDF2Request : public AsyncWrap {
int keylen,
int iteration_count)
: AsyncWrap(env, object, AsyncWrap::PROVIDER_PBKDF2REQUEST),
ThreadPoolWork(env),
digest_(digest),
success_(false),
pass_(std::move(pass)),
Expand All @@ -4580,21 +4581,14 @@ class PBKDF2Request : public AsyncWrap {
iteration_count_(iteration_count) {
}

uv_work_t* work_req() {
return &work_req_;
}

size_t self_size() const override { return sizeof(*this); }

static void Work(uv_work_t* work_req);
void Work();
void DoThreadPoolWork() override;
void AfterThreadPoolWork(int status) override;

static void After(uv_work_t* work_req, int status);
void After(Local<Value> (*argv)[2]);
void After();

private:
uv_work_t work_req_;
const EVP_MD* digest_;
bool success_;
MallocedBuffer<char> pass_;
Expand All @@ -4604,7 +4598,7 @@ class PBKDF2Request : public AsyncWrap {
};


void PBKDF2Request::Work() {
void PBKDF2Request::DoThreadPoolWork() {
success_ =
PKCS5_PBKDF2_HMAC(
pass_.data, pass_.size,
Expand All @@ -4617,12 +4611,6 @@ void PBKDF2Request::Work() {
}


void PBKDF2Request::Work(uv_work_t* work_req) {
PBKDF2Request* req = ContainerOf(&PBKDF2Request::work_req_, work_req);
req->Work();
}


void PBKDF2Request::After(Local<Value> (*argv)[2]) {
if (success_) {
(*argv)[0] = Null(env()->isolate());
Expand All @@ -4635,7 +4623,12 @@ void PBKDF2Request::After(Local<Value> (*argv)[2]) {
}


void PBKDF2Request::After() {
void PBKDF2Request::AfterThreadPoolWork(int status) {
std::unique_ptr<PBKDF2Request> req(this);
if (status == UV_ECANCELED)
return;
CHECK_EQ(status, 0);

HandleScope handle_scope(env()->isolate());
Context::Scope context_scope(env()->context());
Local<Value> argv[2];
Expand All @@ -4644,17 +4637,6 @@ void PBKDF2Request::After() {
}


void PBKDF2Request::After(uv_work_t* work_req, int status) {
std::unique_ptr<PBKDF2Request> req(
ContainerOf(&PBKDF2Request::work_req_, work_req));
req->env()->DecreaseWaitingRequestCounter();
if (status == UV_ECANCELED)
return;
CHECK_EQ(status, 0);
req->After();
}


void PBKDF2(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);

Expand Down Expand Up @@ -4701,14 +4683,10 @@ void PBKDF2(const FunctionCallbackInfo<Value>& args) {
if (args[5]->IsFunction()) {
obj->Set(env->context(), env->ondone_string(), args[5]).FromJust();

env->IncreaseWaitingRequestCounter();
uv_queue_work(env->event_loop(),
req.release()->work_req(),
PBKDF2Request::Work,
PBKDF2Request::After);
req.release()->ScheduleWork();
} else {
env->PrintSyncTrace();
req->Work();
req->DoThreadPoolWork();
Local<Value> argv[2];
req->After(&argv);

Expand All @@ -4721,7 +4699,7 @@ void PBKDF2(const FunctionCallbackInfo<Value>& args) {


// Only instantiate within a valid HandleScope.
class RandomBytesRequest : public AsyncWrap {
class RandomBytesRequest : public AsyncWrap, public ThreadPoolWork {
public:
enum FreeMode { FREE_DATA, DONT_FREE_DATA };

Expand All @@ -4731,16 +4709,13 @@ class RandomBytesRequest : public AsyncWrap {
char* data,
FreeMode free_mode)
: AsyncWrap(env, object, AsyncWrap::PROVIDER_RANDOMBYTESREQUEST),
ThreadPoolWork(env),
error_(0),
size_(size),
data_(data),
free_mode_(free_mode) {
}

uv_work_t* work_req() {
return &work_req_;
}

inline size_t size() const {
return size_;
}
Expand Down Expand Up @@ -4778,7 +4753,8 @@ class RandomBytesRequest : public AsyncWrap {

size_t self_size() const override { return sizeof(*this); }

uv_work_t work_req_;
void DoThreadPoolWork() override;
void AfterThreadPoolWork(int status) override;

private:
unsigned long error_; // NOLINT(runtime/int)
Expand All @@ -4788,21 +4764,17 @@ class RandomBytesRequest : public AsyncWrap {
};


void RandomBytesWork(uv_work_t* work_req) {
RandomBytesRequest* req =
ContainerOf(&RandomBytesRequest::work_req_, work_req);

void RandomBytesRequest::DoThreadPoolWork() {
// Ensure that OpenSSL's PRNG is properly seeded.
CheckEntropy();

const int r = RAND_bytes(reinterpret_cast<unsigned char*>(req->data()),
req->size());
const int r = RAND_bytes(reinterpret_cast<unsigned char*>(data_), size_);

// RAND_bytes() returns 0 on error.
if (r == 0) {
req->set_error(ERR_get_error()); // NOLINT(runtime/int)
set_error(ERR_get_error()); // NOLINT(runtime/int)
} else if (r == -1) {
req->set_error(static_cast<unsigned long>(-1)); // NOLINT(runtime/int)
set_error(static_cast<unsigned long>(-1)); // NOLINT(runtime/int)
}
}

Expand Down Expand Up @@ -4840,27 +4812,24 @@ void RandomBytesCheck(RandomBytesRequest* req, Local<Value> (*argv)[2]) {
}


void RandomBytesAfter(uv_work_t* work_req, int status) {
std::unique_ptr<RandomBytesRequest> req(
ContainerOf(&RandomBytesRequest::work_req_, work_req));
Environment* env = req->env();
env->DecreaseWaitingRequestCounter();
void RandomBytesRequest::AfterThreadPoolWork(int status) {
std::unique_ptr<RandomBytesRequest> req(this);
if (status == UV_ECANCELED)
return;
CHECK_EQ(status, 0);
HandleScope handle_scope(env->isolate());
Context::Scope context_scope(env->context());
HandleScope handle_scope(env()->isolate());
Context::Scope context_scope(env()->context());
Local<Value> argv[2];
RandomBytesCheck(req.get(), &argv);
req->MakeCallback(env->ondone_string(), arraysize(argv), argv);
RandomBytesCheck(this, &argv);
MakeCallback(env()->ondone_string(), arraysize(argv), argv);
}


void RandomBytesProcessSync(Environment* env,
std::unique_ptr<RandomBytesRequest> req,
Local<Value> (*argv)[2]) {
env->PrintSyncTrace();
RandomBytesWork(req->work_req());
req->DoThreadPoolWork();
RandomBytesCheck(req.get(), argv);

if (!(*argv)[0]->IsNull())
Expand All @@ -4887,11 +4856,7 @@ void RandomBytes(const FunctionCallbackInfo<Value>& args) {
if (args[1]->IsFunction()) {
obj->Set(env->context(), env->ondone_string(), args[1]).FromJust();

env->IncreaseWaitingRequestCounter();
uv_queue_work(env->event_loop(),
req.release()->work_req(),
RandomBytesWork,
RandomBytesAfter);
req.release()->ScheduleWork();
args.GetReturnValue().Set(obj);
} else {
Local<Value> argv[2];
Expand Down Expand Up @@ -4927,11 +4892,7 @@ void RandomBytesBuffer(const FunctionCallbackInfo<Value>& args) {
if (args[3]->IsFunction()) {
obj->Set(env->context(), env->ondone_string(), args[3]).FromJust();

env->IncreaseWaitingRequestCounter();
uv_queue_work(env->event_loop(),
req.release()->work_req(),
RandomBytesWork,
RandomBytesAfter);
req.release()->ScheduleWork();
args.GetReturnValue().Set(obj);
} else {
Local<Value> argv[2];
Expand Down
35 changes: 35 additions & 0 deletions src/node_internals.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,41 @@ class InternalCallbackScope {
bool closed_ = false;
};

class ThreadPoolWork {
public:
explicit inline ThreadPoolWork(Environment* env) : env_(env) {}
inline void ScheduleWork();
inline int CancelWork();

virtual void DoThreadPoolWork() = 0;
virtual void AfterThreadPoolWork(int status) = 0;

private:
Environment* env_;
uv_work_t work_req_;
};

void ThreadPoolWork::ScheduleWork() {
env_->IncreaseWaitingRequestCounter();
int status = uv_queue_work(
env_->event_loop(),
&work_req_,
[](uv_work_t* req) {
ThreadPoolWork* self = ContainerOf(&ThreadPoolWork::work_req_, req);
self->DoThreadPoolWork();
},
[](uv_work_t* req, int status) {
ThreadPoolWork* self = ContainerOf(&ThreadPoolWork::work_req_, req);
self->env_->DecreaseWaitingRequestCounter();
self->AfterThreadPoolWork(status);
});
CHECK_EQ(status, 0);
}

int ThreadPoolWork::CancelWork() {
return uv_cancel(reinterpret_cast<uv_req_t*>(&work_req_));
}

static inline const char *errno_string(int errorno) {
#define ERRNO_CASE(e) case e: return #e;
switch (errorno) {
Expand Down
Loading

0 comments on commit 2347ce8

Please sign in to comment.