Permalink
Browse files

unix: add uv_cancel()

  • Loading branch information...
1 parent a385ae4 commit 52c8a8617de020a14b87477a15ddaf107ff34445 @bnoordhuis bnoordhuis committed Nov 26, 2012
Showing with 331 additions and 9 deletions.
  1. +1 −1 include/uv-private/uv-unix.h
  2. +13 −0 include/uv.h
  3. +7 −2 src/unix/fs.c
  4. +8 −1 src/unix/getaddrinfo.c
  5. +1 −1 src/unix/internal.h
  6. +63 −4 src/unix/threadpool.c
  7. +5 −0 src/win/threadpool.c
  8. +6 −0 test/test-list.h
  9. +226 −0 test/test-threadpool-cancel.c
  10. +1 −0 uv.gyp
@@ -60,7 +60,7 @@ struct uv__io_s {
struct uv__work {
void (*work)(struct uv__work *w);
- void (*done)(struct uv__work *w);
+ void (*done)(struct uv__work *w, int status);
struct uv_loop_s* loop;
ngx_queue_t wq;
};
View
@@ -1386,6 +1386,19 @@ struct uv_work_s {
UV_EXTERN int uv_queue_work(uv_loop_t* loop, uv_work_t* req,
uv_work_cb work_cb, uv_after_work_cb after_work_cb);
+/* Cancel a pending request. Fails if the request is executing or has finished
+ * executing.
+ *
+ * Returns 0 on success, -1 on error. The loop error code is not touched.
+ *
+ * Only cancellation of uv_fs_t, uv_getaddrinfo_t and uv_work_t requests is
+ * currently supported.
+ *
+ * This function is currently only implemented on UNIX platforms. On Windows,
+ * it always returns -1.
+ */
+UV_EXTERN int uv_cancel(uv_req_t* req);
+
struct uv_cpu_info_s {
char* model;
View
@@ -90,7 +90,7 @@
} \
else { \
uv__fs_work(&(req)->work_req); \
- uv__fs_done(&(req)->work_req); \
+ uv__fs_done(&(req)->work_req, 0); \
return (req)->result; \
} \
} \
@@ -516,12 +516,17 @@ static void uv__fs_work(struct uv__work* w) {
}
-static void uv__fs_done(struct uv__work* w) {
+static void uv__fs_done(struct uv__work* w, int status) {
uv_fs_t* req;
req = container_of(w, uv_fs_t, work_req);
uv__req_unregister(req->loop, req);
+ if (status != 0) {
+ uv_fs_req_cleanup(req);
+ return;
+ }
+
if (req->errorno != 0) {
req->errorno = uv_translate_sys_error(req->errorno);
uv__set_artificial_error(req->loop, req->errorno);
View
@@ -37,7 +37,7 @@ static void uv__getaddrinfo_work(struct uv__work* w) {
}
-static void uv__getaddrinfo_done(struct uv__work* w) {
+static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req);
struct addrinfo *res = req->res;
#if __sun
@@ -63,6 +63,13 @@ static void uv__getaddrinfo_done(struct uv__work* w) {
else
assert(0);
+ req->hints = NULL;
+ req->service = NULL;
+ req->hostname = NULL;
+
+ if (status != 0)
+ return;
+
if (req->retcode == 0) {
/* OK */
#if EAI_NODATA /* FreeBSD deprecated EAI_NODATA */
View
@@ -174,7 +174,7 @@ void uv__signal_loop_cleanup();
void uv__work_submit(uv_loop_t* loop,
struct uv__work *w,
void (*work)(struct uv__work *w),
- void (*done)(struct uv__work *w));
+ void (*done)(struct uv__work *w, int status));
void uv__work_done(uv_async_t* handle, int status);
/* platform specific */
View
@@ -30,6 +30,9 @@ static ngx_queue_t wq;
static volatile int initialized;
+/* To avoid deadlock with uv_cancel() it's crucial that the worker
+ * never holds the global mutex and the loop-local mutex at the same time.
+ */
static void worker(void* arg) {
struct uv__work* w;
ngx_queue_t* q;
@@ -46,8 +49,11 @@ static void worker(void* arg) {
if (q == &exit_message)
uv_cond_signal(&cond);
- else
+ else {
ngx_queue_remove(q);
+ ngx_queue_init(q); /* Signal uv_cancel() that the work req is
+ executing. */
+ }
uv_mutex_unlock(&mutex);
@@ -58,6 +64,8 @@ static void worker(void* arg) {
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
+ w->work = NULL; /* Signal uv_cancel() that the work req is done
+ executing. */
ngx_queue_insert_tail(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
@@ -116,7 +124,7 @@ static void cleanup(void) {
void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
void (*work)(struct uv__work* w),
- void (*done)(struct uv__work* w)) {
+ void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
@@ -125,6 +133,29 @@ void uv__work_submit(uv_loop_t* loop,
}
+int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
+ int cancelled;
+
+ uv_mutex_lock(&mutex);
+ uv_mutex_lock(&w->loop->wq_mutex);
+
+ cancelled = !ngx_queue_empty(&w->wq) && w->work != NULL;
+ if (cancelled)
+ ngx_queue_remove(&w->wq);
+
+ uv_mutex_unlock(&w->loop->wq_mutex);
+ uv_mutex_unlock(&mutex);
+
+ if (!cancelled)
+ return -1;
+
+ ngx_queue_init(&w->wq);
+ w->done(w, -UV_ECANCELED);
+
+ return 0;
+}
+
+
void uv__work_done(uv_async_t* handle, int status) {
struct uv__work* w;
uv_loop_t* loop;
@@ -146,7 +177,7 @@ void uv__work_done(uv_async_t* handle, int status) {
ngx_queue_remove(q);
w = container_of(q, struct uv__work, wq);
- w->done(w);
+ w->done(w, 0);
}
}
@@ -158,11 +189,14 @@ static void uv__queue_work(struct uv__work* w) {
}
-static void uv__queue_done(struct uv__work* w) {
+static void uv__queue_done(struct uv__work* w, int status) {
uv_work_t* req = container_of(w, uv_work_t, work_req);
uv__req_unregister(req->loop, req);
+ if (status != 0)
+ return;
+
if (req->after_work_cb)
req->after_work_cb(req);
}
@@ -182,3 +216,28 @@ int uv_queue_work(uv_loop_t* loop,
uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
return 0;
}
+
+
+int uv_cancel(uv_req_t* req) {
+ struct uv__work* wreq;
+ uv_loop_t* loop;
+
+ switch (req->type) {
+ case UV_FS:
+ loop = ((uv_fs_t*) req)->loop;
+ wreq = &((uv_fs_t*) req)->work_req;
+ break;
+ case UV_GETADDRINFO:
+ loop = ((uv_getaddrinfo_t*) req)->loop;
+ wreq = &((uv_getaddrinfo_t*) req)->work_req;
+ break;
+ case UV_WORK:
+ loop = ((uv_work_t*) req)->loop;
+ wreq = &((uv_work_t*) req)->work_req;
+ break;
+ default:
+ return -1;
+ }
+
+ return uv__work_cancel(loop, req, wreq);
+}
View
@@ -70,6 +70,11 @@ int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb,
}
+int uv_cancel(uv_req_t* req) {
+ return -1;
+}
+
+
void uv_process_work_req(uv_loop_t* loop, uv_work_t* req) {
uv__req_unregister(loop, req);
if(req->after_work_cb)
View
@@ -187,6 +187,9 @@ TEST_DECLARE (fs_rename_to_existing_file)
TEST_DECLARE (threadpool_queue_work_simple)
TEST_DECLARE (threadpool_queue_work_einval)
TEST_DECLARE (threadpool_multiple_event_loops)
+TEST_DECLARE (threadpool_cancel_getaddrinfo)
+TEST_DECLARE (threadpool_cancel_work)
+TEST_DECLARE (threadpool_cancel_fs)
TEST_DECLARE (thread_mutex)
TEST_DECLARE (thread_rwlock)
TEST_DECLARE (thread_create)
@@ -454,6 +457,9 @@ TASK_LIST_START
TEST_ENTRY (threadpool_queue_work_simple)
TEST_ENTRY (threadpool_queue_work_einval)
TEST_ENTRY (threadpool_multiple_event_loops)
+ TEST_ENTRY (threadpool_cancel_getaddrinfo)
+ TEST_ENTRY (threadpool_cancel_work)
+ TEST_ENTRY (threadpool_cancel_fs)
TEST_ENTRY (thread_mutex)
TEST_ENTRY (thread_rwlock)
TEST_ENTRY (thread_create)
Oops, something went wrong.

1 comment on commit 52c8a86

Contributor

ry commented on 52c8a86 Dec 10, 2012

sweet

Please sign in to comment.