@@ -37,11 +37,16 @@ static void uv__getaddrinfo_work(struct uv__work* w) {
}


static void uv__getaddrinfo_done(struct uv__work* w) {
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req);
struct addrinfo *res = req->res;
#if __sun
size_t hostlen = strlen(req->hostname);
size_t hostlen;

if (req->hostname)
hostlen = strlen(req->hostname);
else
hostlen = 0;
#endif

req->res = NULL;
@@ -58,6 +63,10 @@ static void uv__getaddrinfo_done(struct uv__work* w) {
else
assert(0);

req->hints = NULL;
req->service = NULL;
req->hostname = NULL;

if (req->retcode == 0) {
/* OK */
#if EAI_NODATA /* FreeBSD deprecated EAI_NODATA */
@@ -75,6 +84,12 @@ static void uv__getaddrinfo_done(struct uv__work* w) {
req->loop->last_err.sys_errno_ = req->retcode;
}

if (status == -UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_ECANCELED;
uv__set_artificial_error(req->loop, UV_ECANCELED);
}

req->cb(req, req->retcode, res);
}

@@ -130,6 +130,7 @@ void uv__make_close_pending(uv_handle_t* handle);
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_close(uv_loop_t* loop, uv__io_t* w);
void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
int uv__io_active(const uv__io_t* w, unsigned int events);
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
@@ -163,7 +164,7 @@ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);

/* timer */
void uv__run_timers(uv_loop_t* loop);
unsigned int uv__next_timeout(uv_loop_t* loop);
int uv__next_timeout(const uv_loop_t* loop);

/* signal */
void uv__signal_close(uv_signal_t* handle);
@@ -174,7 +175,7 @@ void uv__signal_loop_cleanup();
void uv__work_submit(uv_loop_t* loop,
struct uv__work *w,
void (*work)(struct uv__work *w),
void (*done)(struct uv__work *w));
void (*done)(struct uv__work *w, int status));
void uv__work_done(uv_async_t* handle, int status);

/* platform specific */
@@ -197,6 +198,13 @@ void uv__timer_close(uv_timer_t* handle);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);

#if defined(__APPLE__)
int uv___stream_fd(uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((uv_stream_t*) (handle)))
#else
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */

#ifdef UV__O_NONBLOCK
# define UV__F_NONBLOCK UV__O_NONBLOCK
#else
@@ -57,7 +57,7 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
bound = 0;

/* Already bound? */
if (handle->io_watcher.fd >= 0) {
if (uv__stream_fd(handle) >= 0) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
goto out;
}
@@ -117,13 +117,13 @@ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
saved_errno = errno;
status = -1;

if (handle->io_watcher.fd == -1) {
if (uv__stream_fd(handle) == -1) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
goto out;
}
assert(handle->io_watcher.fd >= 0);
assert(uv__stream_fd(handle) >= 0);

if ((status = listen(handle->io_watcher.fd, backlog)) == -1) {
if ((status = listen(uv__stream_fd(handle), backlog)) == -1) {
uv__set_sys_error(handle->loop, errno);
} else {
handle->connection_cb = cb;
@@ -172,7 +172,7 @@ void uv_pipe_connect(uv_connect_t* req,
int r;

saved_errno = errno;
new_sock = (handle->io_watcher.fd == -1);
new_sock = (uv__stream_fd(handle) == -1);
err = -1;

if (new_sock)
@@ -187,7 +187,8 @@ void uv_pipe_connect(uv_connect_t* req,
* is either there or not.
*/
do {
r = connect(handle->io_watcher.fd, (struct sockaddr*)&saddr, sizeof saddr);
r = connect(uv__stream_fd(handle),
(struct sockaddr*)&saddr, sizeof saddr);
}
while (r == -1 && errno == EINTR);

@@ -196,7 +197,7 @@ void uv_pipe_connect(uv_connect_t* req,

if (new_sock)
if (uv__stream_open((uv_stream_t*)handle,
handle->io_watcher.fd,
uv__stream_fd(handle),
UV_STREAM_READABLE | UV_STREAM_WRITABLE))
goto out;

@@ -233,7 +234,7 @@ static void uv__pipe_accept(uv_loop_t* loop, uv__io_t* w, unsigned int events) {

assert(pipe->type == UV_NAMED_PIPE);

sockfd = uv__accept(pipe->io_watcher.fd);
sockfd = uv__accept(uv__stream_fd(pipe));
if (sockfd == -1) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
uv__set_sys_error(pipe->loop, errno);
@@ -204,7 +204,7 @@ static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
if (container->flags & UV_INHERIT_FD) {
fd = container->data.fd;
} else {
fd = container->data.stream->io_watcher.fd;
fd = uv__stream_fd(container->data.stream);
}

if (fd == -1) {
@@ -363,10 +363,11 @@ int uv_spawn(uv_loop_t* loop,
int i;

assert(options.file != NULL);
assert(!(options.flags & ~(UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS |
UV_PROCESS_DETACHED |
assert(!(options.flags & ~(UV_PROCESS_DETACHED |
UV_PROCESS_SETGID |
UV_PROCESS_SETUID)));
UV_PROCESS_SETUID |
UV_PROCESS_WINDOWS_HIDE |
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));

uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
ngx_queue_init(&process->queue);

Large diffs are not rendered by default.

@@ -37,7 +37,7 @@ int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) {
int sockfd;

if (handle->io_watcher.fd != -1)
if (uv__stream_fd(handle) != -1)
return 0;

sockfd = uv__socket(domain, SOCK_STREAM, 0);
@@ -58,29 +58,21 @@ static int uv__bind(uv_tcp_t* tcp,
int domain,
struct sockaddr* addr,
int addrsize) {
int saved_errno;
int status;

saved_errno = errno;
status = -1;
int on;

if (maybe_new_socket(tcp, domain, UV_STREAM_READABLE|UV_STREAM_WRITABLE))
return -1;

tcp->delayed_error = 0;
if (bind(tcp->io_watcher.fd, addr, addrsize) == -1) {
if (errno == EADDRINUSE) {
tcp->delayed_error = errno;
} else {
uv__set_sys_error(tcp->loop, errno);
goto out;
}
}
status = 0;
on = 1;
if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
return uv__set_sys_error(tcp->loop, errno);

out:
errno = saved_errno;
return status;
errno = 0;
if (bind(tcp->io_watcher.fd, addr, addrsize) && errno != EADDRINUSE)
return uv__set_sys_error(tcp->loop, errno);

tcp->delayed_error = errno;
return 0;
}


@@ -105,7 +97,7 @@ static int uv__connect(uv_connect_t* req,
handle->delayed_error = 0;

do
r = connect(handle->io_watcher.fd, addr, addrlen);
r = connect(uv__stream_fd(handle), addr, addrlen);
while (r == -1 && errno == EINTR);

if (r == -1) {
@@ -174,7 +166,7 @@ int uv_tcp_getsockname(uv_tcp_t* handle, struct sockaddr* name,
goto out;
}

if (handle->io_watcher.fd < 0) {
if (uv__stream_fd(handle) < 0) {
uv__set_sys_error(handle->loop, EINVAL);
rv = -1;
goto out;
@@ -183,7 +175,7 @@ int uv_tcp_getsockname(uv_tcp_t* handle, struct sockaddr* name,
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t)*namelen;

if (getsockname(handle->io_watcher.fd, name, &socklen) == -1) {
if (getsockname(uv__stream_fd(handle), name, &socklen) == -1) {
uv__set_sys_error(handle->loop, errno);
rv = -1;
} else {
@@ -211,7 +203,7 @@ int uv_tcp_getpeername(uv_tcp_t* handle, struct sockaddr* name,
goto out;
}

if (handle->io_watcher.fd < 0) {
if (uv__stream_fd(handle) < 0) {
uv__set_sys_error(handle->loop, EINVAL);
rv = -1;
goto out;
@@ -220,7 +212,7 @@ int uv_tcp_getpeername(uv_tcp_t* handle, struct sockaddr* name,
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t)*namelen;

if (getpeername(handle->io_watcher.fd, name, &socklen) == -1) {
if (getpeername(uv__stream_fd(handle), name, &socklen) == -1) {
uv__set_sys_error(handle->loop, errno);
rv = -1;
} else {
@@ -320,8 +312,8 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) {


int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
if (handle->io_watcher.fd != -1)
if (uv__tcp_nodelay(handle->io_watcher.fd, on))
if (uv__stream_fd(handle) != -1)
if (uv__tcp_nodelay(uv__stream_fd(handle), on))
return -1;

if (on)
@@ -334,16 +326,16 @@ int uv_tcp_nodelay(uv_tcp_t* handle, int on) {


int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
if (handle->io_watcher.fd != -1)
if (uv__tcp_keepalive(handle->io_watcher.fd, on, delay))
if (uv__stream_fd(handle) != -1)
if (uv__tcp_keepalive(uv__stream_fd(handle), on, delay))
return -1;

if (on)
handle->flags |= UV_TCP_KEEPALIVE;
else
handle->flags &= ~UV_TCP_KEEPALIVE;

/* TODO Store delay if handle->io_watcher.fd == -1 but don't want to enlarge
/* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
* uv_tcp_t with an int that's almost never used...
*/

@@ -20,43 +20,48 @@
*/

#include "internal.h"
#include <stdlib.h>

#include <errno.h>
#include <pthread.h>

/* TODO add condvar support to libuv */
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static pthread_once_t once = PTHREAD_ONCE_INIT;
static pthread_t threads[4];
static uv_once_t once = UV_ONCE_INIT;
static uv_cond_t cond;
static uv_mutex_t mutex;
static uv_thread_t threads[4];
static ngx_queue_t exit_message;
static ngx_queue_t wq = { &wq, &wq };
static ngx_queue_t wq;
static volatile int initialized;


static void* worker(void* arg) {
static void uv__cancelled(struct uv__work* w) {
abort();
}


/* To avoid deadlock with uv_cancel() it's crucial that the worker
* never holds the global mutex and the loop-local mutex at the same time.
*/
static void worker(void* arg) {
struct uv__work* w;
ngx_queue_t* q;

(void) arg;

for (;;) {
if (pthread_mutex_lock(&mutex))
abort();
uv_mutex_lock(&mutex);

while (ngx_queue_empty(&wq))
if (pthread_cond_wait(&cond, &mutex))
abort();
uv_cond_wait(&cond, &mutex);

q = ngx_queue_head(&wq);

if (q == &exit_message)
pthread_cond_signal(&cond);
else
uv_cond_signal(&cond);
else {
ngx_queue_remove(q);
ngx_queue_init(q); /* Signal uv_cancel() that the work req is
executing. */
}

if (pthread_mutex_unlock(&mutex))
abort();
uv_mutex_unlock(&mutex);

if (q == &exit_message)
break;
@@ -65,36 +70,43 @@ static void* worker(void* arg) {
w->work(w);

uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
ngx_queue_insert_tail(&w->loop->wq, &w->wq);
uv_mutex_unlock(&w->loop->wq_mutex);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
}

return NULL;
}


static void post(ngx_queue_t* q) {
pthread_mutex_lock(&mutex);
uv_mutex_lock(&mutex);
ngx_queue_insert_tail(&wq, q);
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
}


static void init_once(void) {
unsigned int i;

if (uv_cond_init(&cond))
abort();

if (uv_mutex_init(&mutex))
abort();

ngx_queue_init(&wq);

for (i = 0; i < ARRAY_SIZE(threads); i++)
if (pthread_create(threads + i, NULL, worker, NULL))
if (uv_thread_create(threads + i, worker, NULL))
abort();

initialized = 1;
}


#if defined(__GNUC__)
__attribute__((destructor))
static void cleanup(void) {
unsigned int i;
@@ -105,30 +117,59 @@ static void cleanup(void) {
post(&exit_message);

for (i = 0; i < ARRAY_SIZE(threads); i++)
if (pthread_join(threads[i], NULL))
if (uv_thread_join(threads + i))
abort();

uv_mutex_destroy(&mutex);
uv_cond_destroy(&cond);
initialized = 0;
}
#endif


void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
void (*work)(struct uv__work* w),
void (*done)(struct uv__work* w)) {
pthread_once(&once, init_once);
void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
w->done = done;
post(&w->wq);
}


int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
int cancelled;

uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex);

cancelled = !ngx_queue_empty(&w->wq) && w->work != NULL;
if (cancelled)
ngx_queue_remove(&w->wq);

uv_mutex_unlock(&w->loop->wq_mutex);
uv_mutex_unlock(&mutex);

if (!cancelled)
return -1;

w->work = uv__cancelled;
uv_mutex_lock(&loop->wq_mutex);
ngx_queue_insert_tail(&loop->wq, &w->wq);
uv_mutex_unlock(&loop->wq_mutex);

return 0;
}


void uv__work_done(uv_async_t* handle, int status) {
struct uv__work* w;
uv_loop_t* loop;
ngx_queue_t* q;
ngx_queue_t wq;
int err;

loop = container_of(handle, uv_loop_t, wq_async);
ngx_queue_init(&wq);
@@ -145,37 +186,71 @@ void uv__work_done(uv_async_t* handle, int status) {
ngx_queue_remove(q);

w = container_of(q, struct uv__work, wq);
w->done(w);
err = (w->work == uv__cancelled) ? -UV_ECANCELED : 0;
w->done(w, err);
}
}


static void uv__queue_work(struct uv__work* w) {
uv_work_t* req = container_of(w, uv_work_t, work_req);

if (req->work_cb)
req->work_cb(req);
req->work_cb(req);
}


static void uv__queue_done(struct uv__work* w) {
uv_work_t* req = container_of(w, uv_work_t, work_req);
static void uv__queue_done(struct uv__work* w, int status) {
uv_work_t* req;

req = container_of(w, uv_work_t, work_req);
uv__req_unregister(req->loop, req);

if (req->after_work_cb)
req->after_work_cb(req);
if (req->after_work_cb == NULL)
return;

if (status == -UV_ECANCELED)
uv__set_artificial_error(req->loop, UV_ECANCELED);

req->after_work_cb(req, status ? -1 : 0);
}


int uv_queue_work(uv_loop_t* loop,
uv_work_t* req,
uv_work_cb work_cb,
uv_after_work_cb after_work_cb) {
if (work_cb == NULL)
return uv__set_artificial_error(loop, UV_EINVAL);

uv__req_init(loop, req, UV_WORK);
req->loop = loop;
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
return 0;
}


int uv_cancel(uv_req_t* req) {
struct uv__work* wreq;
uv_loop_t* loop;

switch (req->type) {
case UV_FS:
loop = ((uv_fs_t*) req)->loop;
wreq = &((uv_fs_t*) req)->work_req;
break;
case UV_GETADDRINFO:
loop = ((uv_getaddrinfo_t*) req)->loop;
wreq = &((uv_getaddrinfo_t*) req)->work_req;
break;
case UV_WORK:
loop = ((uv_work_t*) req)->loop;
wreq = &((uv_work_t*) req)->work_req;
break;
default:
return -1;
}

return uv__work_cancel(loop, req, wreq);
}
@@ -102,13 +102,14 @@ int64_t uv_timer_get_repeat(uv_timer_t* handle) {
}


unsigned int uv__next_timeout(uv_loop_t* loop) {
uv_timer_t* handle;
int uv__next_timeout(const uv_loop_t* loop) {
const uv_timer_t* handle;

handle = RB_MIN(uv__timers, &loop->timer_handles);
/* RB_MIN expects a non-const tree root. That's okay, it doesn't modify it. */
handle = RB_MIN(uv__timers, (struct uv__timers*) &loop->timer_handles);

if (handle == NULL)
return (unsigned int) -1; /* block indefinitely */
return -1; /* block indefinitely */

if (handle->timeout <= loop->time)
return 0;
@@ -54,7 +54,7 @@ int uv_tty_set_mode(uv_tty_t* tty, int mode) {
struct termios raw;
int fd;

fd = tty->io_watcher.fd;
fd = uv__stream_fd(tty);

if (mode && tty->mode == 0) {
/* on */
@@ -105,7 +105,7 @@ int uv_tty_set_mode(uv_tty_t* tty, int mode) {
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
struct winsize ws;

if (ioctl(tty->io_watcher.fd, TIOCGWINSZ, &ws) < 0) {
if (ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws) < 0) {
uv__set_sys_error(tty->loop, errno);
return -1;
}
@@ -40,7 +40,7 @@ static int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[],


void uv__udp_close(uv_udp_t* handle) {
uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLIN | UV__POLLOUT);
uv__io_close(handle->loop, &handle->io_watcher);
uv__handle_stop(handle);
close(handle->io_watcher.fd);
handle->io_watcher.fd = -1;
@@ -171,6 +171,16 @@ void uv_loop_delete(uv_loop_t* loop) {
}


int uv_backend_fd(const uv_loop_t* loop) {
return -1;
}


int uv_backend_timeout(const uv_loop_t* loop) {
return 0;
}


static void uv_poll(uv_loop_t* loop, int block) {
BOOL success;
DWORD bytes, timeout;
@@ -109,6 +109,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
case WSAECONNRESET: return UV_ECONNRESET;
case ERROR_ALREADY_EXISTS: return UV_EEXIST;
case ERROR_FILE_EXISTS: return UV_EEXIST;
case ERROR_BUFFER_OVERFLOW: return UV_EFAULT;
case WSAEFAULT: return UV_EFAULT;
case ERROR_HOST_UNREACHABLE: return UV_EHOSTUNREACH;
case WSAEHOSTUNREACH: return UV_EHOSTUNREACH;
@@ -125,6 +126,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
case ERROR_NETWORK_UNREACHABLE: return UV_ENETUNREACH;
case WSAENETUNREACH: return UV_ENETUNREACH;
case WSAENOBUFS: return UV_ENOBUFS;
case ERROR_NOT_ENOUGH_MEMORY: return UV_ENOMEM;
case ERROR_OUTOFMEMORY: return UV_ENOMEM;
case ERROR_CANNOT_MAKE: return UV_ENOSPC;
case ERROR_DISK_FULL: return UV_ENOSPC;
@@ -777,10 +777,11 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
}

assert(options.file != NULL);
assert(!(options.flags & ~(UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS |
UV_PROCESS_DETACHED |
assert(!(options.flags & ~(UV_PROCESS_DETACHED |
UV_PROCESS_SETGID |
UV_PROCESS_SETUID)));
UV_PROCESS_SETUID |
UV_PROCESS_WINDOWS_HIDE |
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));

uv_process_init(loop, process);
process->exit_cb = options.exit_cb;
@@ -872,13 +873,22 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
startup.lpReserved = NULL;
startup.lpDesktop = NULL;
startup.lpTitle = NULL;
startup.dwFlags = STARTF_USESTDHANDLES;
startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;

startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer);
startup.lpReserved2 = (BYTE*) process->child_stdio_buffer;

startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0);
startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1);
startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2);

if (options.flags & UV_PROCESS_WINDOWS_HIDE) {
/* Use SW_HIDE to avoid any potential process window. */
startup.wShowWindow = SW_HIDE;
} else {
startup.wShowWindow = SW_SHOWDEFAULT;
}

process_flags = CREATE_UNICODE_ENVIRONMENT;
if (options.flags & UV_PROCESS_DETACHED) {
process_flags |= DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP;
@@ -55,6 +55,9 @@ static DWORD WINAPI uv_work_thread_proc(void* parameter) {

int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb,
uv_after_work_cb after_work_cb) {
if (work_cb == NULL)
return uv__set_artificial_error(loop, UV_EINVAL);

uv_work_req_init(loop, req, work_cb, after_work_cb);

if (!QueueUserWorkItem(&uv_work_thread_proc, req, WT_EXECUTELONGFUNCTION)) {
@@ -67,8 +70,13 @@ int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb,
}


int uv_cancel(uv_req_t* req) {
return -1;
}


void uv_process_work_req(uv_loop_t* loop, uv_work_t* req) {
uv__req_unregister(loop, req);
if(req->after_work_cb)
req->after_work_cb(req);
req->after_work_cb(req, 0);
}
@@ -740,125 +740,207 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
}


uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
unsigned long size = 0;
IP_ADAPTER_ADDRESSES* adapter_addresses;
IP_ADAPTER_ADDRESSES* adapter_address;
uv_interface_address_t* address;
struct sockaddr* sock_addr;
int length;
char* name;
/* Use IP_ADAPTER_UNICAST_ADDRESS_XP to retain backwards compatibility */
/* with Windows XP */
IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address;
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses_ptr,
int* count_ptr) {
IP_ADAPTER_ADDRESSES* win_address_buf;
ULONG win_address_buf_size;
IP_ADAPTER_ADDRESSES* win_address;

if (GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &size)
!= ERROR_BUFFER_OVERFLOW) {
return uv__new_sys_error(GetLastError());
}
uv_interface_address_t* uv_address_buf;
char* name_buf;
size_t uv_address_buf_size;
uv_interface_address_t* uv_address;

adapter_addresses = (IP_ADAPTER_ADDRESSES*)malloc(size);
if (!adapter_addresses) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
int count;

if (GetAdaptersAddresses(AF_UNSPEC, 0, NULL, adapter_addresses, &size)
!= ERROR_SUCCESS) {
return uv__new_sys_error(GetLastError());
}
/* Fetch the size of the adapters reported by windows, and then get the */
/* list itself. */
win_address_buf_size = 0;
win_address_buf = NULL;

/* Count the number of interfaces */
*count = 0;
for (;;) {
ULONG r;

for (adapter_address = adapter_addresses;
adapter_address != NULL;
adapter_address = adapter_address->Next) {
/* If win_address_buf is 0, then GetAdaptersAddresses will fail with */
/* ERROR_BUFFER_OVERFLOW, and the required buffer size will be stored in */
/* win_address_buf_size. */
r = GetAdaptersAddresses(AF_UNSPEC,
0,
NULL,
win_address_buf,
&win_address_buf_size);

if (adapter_address->OperStatus != IfOperStatusUp)
continue;
if (r == ERROR_SUCCESS)
break;

free(win_address_buf);

switch (r) {
case ERROR_BUFFER_OVERFLOW:
/* This happens when win_address_buf is NULL or too small to hold */
/* all adapters. */
win_address_buf = malloc(win_address_buf_size);
if (win_address_buf == NULL)
return uv__new_artificial_error(UV_ENOMEM);

continue;

unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*)
adapter_address->FirstUnicastAddress;
case ERROR_NO_DATA: {
/* No adapters were found. */
uv_address_buf = malloc(1);
if (uv_address_buf == NULL)
return uv__new_artificial_error(UV_ENOMEM);

while (unicast_address) {
(*count)++;
unicast_address = unicast_address->Next;
*count_ptr = 0;
*addresses_ptr = uv_address_buf;

return uv_ok_;
}

case ERROR_ADDRESS_NOT_ASSOCIATED:
return uv__new_artificial_error(UV_EAGAIN);

case ERROR_INVALID_PARAMETER:
/* MSDN says:
* "This error is returned for any of the following conditions: the
* SizePointer parameter is NULL, the Address parameter is not
* AF_INET, AF_INET6, or AF_UNSPEC, or the address information for
* the parameters requested is greater than ULONG_MAX."
* Since the first two conditions are not met, it must be that the
* adapter data is too big.
*/
return uv__new_artificial_error(UV_ENOBUFS);

default:
/* Other (unspecified) errors can happen, but we don't have any */
/* special meaning for them. */
assert(r != ERROR_SUCCESS);
return uv__new_sys_error(r);
}
}

*addresses = (uv_interface_address_t*)
malloc(*count * sizeof(uv_interface_address_t));
if (!(*addresses)) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
/* Count the number of enabled interfaces and compute how much space is */
/* needed to store their info. */
count = 0;
uv_address_buf_size = 0;

for (win_address = win_address_buf;
win_address != NULL;
win_address = win_address->Next) {
/* Use IP_ADAPTER_UNICAST_ADDRESS_XP to retain backwards compatibility */
/* with Windows XP */
IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address;
int name_size;

/* Interfaces that are not 'up' should not be reported. Also skip */
/* interfaces that have no associated unicast address, as to avoid */
/* allocating space for the name for this interface. */
if (win_address->OperStatus != IfOperStatusUp ||
win_address->FirstUnicastAddress == NULL)
continue;

/* Compute the size of the interface name. */
name_size = WideCharToMultiByte(CP_UTF8,
0,
win_address->FriendlyName,
-1,
NULL,
0,
NULL,
FALSE);
if (name_size <= 0) {
free(win_address_buf);
return uv__new_sys_error(GetLastError());
}
uv_address_buf_size += name_size;

/* Count the number of addresses associated with this interface, and */
/* compute the size. */
for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*)
win_address->FirstUnicastAddress;
unicast_address != NULL;
unicast_address = unicast_address->Next) {
count++;
uv_address_buf_size += sizeof(uv_interface_address_t);
}
}

address = *addresses;
/* Allocate space to store interface data plus adapter names. */
uv_address_buf = malloc(uv_address_buf_size);
if (uv_address_buf == NULL) {
free(win_address_buf);
return uv__new_artificial_error(UV_ENOMEM);
}

for (adapter_address = adapter_addresses;
adapter_address != NULL;
adapter_address = adapter_address->Next) {
/* Compute the start of the uv_interface_address_t array, and the place in */
/* the buffer where the interface names will be stored. */
uv_address = uv_address_buf;
name_buf = (char*) (uv_address_buf + count);

if (adapter_address->OperStatus != IfOperStatusUp)
/* Fill out the output buffer. */
for (win_address = win_address_buf;
win_address != NULL;
win_address = win_address->Next) {
IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address;
int name_size;
size_t max_name_size;

if (win_address->OperStatus != IfOperStatusUp ||
win_address->FirstUnicastAddress == NULL)
continue;

name = NULL;
unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*)
adapter_address->FirstUnicastAddress;
/* Convert the interface name to UTF8. */
max_name_size = (char*) uv_address_buf + uv_address_buf_size - name_buf;
if (max_name_size > (size_t) INT_MAX)
max_name_size = INT_MAX;
name_size = WideCharToMultiByte(CP_UTF8,
0,
win_address->FriendlyName,
-1,
name_buf,
(int) max_name_size,
NULL,
FALSE);
if (name_size <= 0) {
free(win_address_buf);
free(uv_address_buf);
return uv__new_sys_error(GetLastError());
}

while (unicast_address) {
sock_addr = unicast_address->Address.lpSockaddr;
if (sock_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6 *)sock_addr);
} else {
address->address.address4 = *((struct sockaddr_in *)sock_addr);
}
/* Add an uv_interface_address_t element for every unicast address. */
for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*)
win_address->FirstUnicastAddress;
unicast_address != NULL;
unicast_address = unicast_address->Next) {
struct sockaddr* sa;

address->is_internal =
adapter_address->IfType == IF_TYPE_SOFTWARE_LOOPBACK ? 1 : 0;

if (!name) {
/* Convert FriendlyName to utf8 */
length = uv_utf16_to_utf8(adapter_address->FriendlyName, -1, NULL, 0);
if (length) {
name = (char*)malloc(length);
if (!name) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}

if (!uv_utf16_to_utf8(adapter_address->FriendlyName, -1, name,
length)) {
free(name);
name = NULL;
}
}
}
uv_address->name = name_buf;

sa = unicast_address->Address.lpSockaddr;
if (sa->sa_family == AF_INET6)
uv_address->address.address6 = *((struct sockaddr_in6 *) sa);
else
uv_address->address.address4 = *((struct sockaddr_in *) sa);

assert(name);
address->name = name;
uv_address->is_internal =
(win_address->IfType == IF_TYPE_SOFTWARE_LOOPBACK);

unicast_address = unicast_address->Next;
address++;
uv_address++;
}

name_buf += name_size;
}

free(adapter_addresses);
free(win_address_buf);

*addresses_ptr = uv_address_buf;
*count_ptr = count;

return uv_ok_;
}


void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
char* freed_name = NULL;

for (i = 0; i < count; i++) {
if (freed_name != addresses[i].name) {
freed_name = addresses[i].name;
free(freed_name);
}
}

free(addresses);
}
@@ -82,9 +82,6 @@ static void producer(void* arg) {
uv_cond_signal(&full);
uv_mutex_unlock(&mutex);
}

LOGF("finished_consumers: %d\n", finished_consumers);
ASSERT(finished_consumers == MAX_CONSUMERS);
}


@@ -129,6 +126,10 @@ TEST_IMPL(consumer_producer) {
}

ASSERT(0 == uv_thread_join(&pthread));

LOGF("finished_consumers: %d\n", finished_consumers);
ASSERT(finished_consumers == MAX_CONSUMERS);

uv_cond_destroy(&empty);
uv_cond_destroy(&full);
uv_mutex_destroy(&mutex);
@@ -0,0 +1,132 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/

#include "uv.h"
#include "task.h"
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>

#ifndef HAVE_KQUEUE
# if __APPLE__ || __DragonFly__ || __FreeBSD__ || __OpenBSD__ || __NetBSD__
# define HAVE_KQUEUE 1
# endif
#endif

#ifndef HAVE_EPOLL
# if defined(__linux__)
# define HAVE_EPOLL 1
# endif
#endif

#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)

#if defined(HAVE_KQUEUE)
# include <sys/types.h>
# include <sys/event.h>
# include <sys/time.h>
#endif

#if defined(HAVE_EPOLL)
# include <sys/epoll.h>
#endif

static uv_thread_t embed_thread;
static uv_sem_t embed_sem;
static uv_timer_t embed_timer;
static uv_async_t embed_async;
static volatile int embed_closed;

static int embed_timer_called;


static void embed_thread_runner(void* arg) {
int r;
int fd;
int timeout;

while (!embed_closed) {
fd = uv_backend_fd(uv_default_loop());
timeout = uv_backend_timeout(uv_default_loop());

do {
#if defined(HAVE_KQUEUE)
struct timespec ts;
ts.tv_sec = timeout / 1000;
ts.tv_nsec = (timeout % 1000) * 1000000;
r = kevent(fd, NULL, 0, NULL, 0, &ts);
#elif defined(HAVE_EPOLL)
r = epoll_wait(fd, NULL, 0, timeout);
#endif
} while (r == -1 && errno == EINTR);
uv_async_send(&embed_async);
uv_sem_wait(&embed_sem);
}
}


static void embed_cb(uv_async_t* async, int status) {
uv_run_once(uv_default_loop());

uv_sem_post(&embed_sem);
}


static void embed_timer_cb(uv_timer_t* timer, int status) {
embed_timer_called++;
embed_closed = 1;

uv_close((uv_handle_t*) &embed_async, NULL);
}
#endif


TEST_IMPL(embed) {
#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
uv_loop_t* external;

external = uv_loop_new();
ASSERT(external != NULL);

embed_timer_called = 0;
embed_closed = 0;

uv_async_init(external, &embed_async, embed_cb);

/* Start timer in default loop */
uv_timer_init(uv_default_loop(), &embed_timer);
uv_timer_start(&embed_timer, embed_timer_cb, 250, 0);

/* Start worker that will interrupt external loop */
uv_sem_init(&embed_sem, 0);
uv_thread_create(&embed_thread, embed_thread_runner, NULL);

/* But run external loop */
uv_run(external);

uv_thread_join(&embed_thread);
uv_loop_delete(external);

ASSERT(embed_timer_called == 1);
#endif

return 0;
}
@@ -67,6 +67,7 @@ TEST_DECLARE (tcp_flags)
TEST_DECLARE (tcp_write_error)
TEST_DECLARE (tcp_write_to_half_open_connection)
TEST_DECLARE (tcp_unexpected_read)
TEST_DECLARE (tcp_read_stop)
TEST_DECLARE (tcp_bind6_error_addrinuse)
TEST_DECLARE (tcp_bind6_error_addrnotavail)
TEST_DECLARE (tcp_bind6_error_fault)
@@ -124,6 +125,7 @@ TEST_DECLARE (pipe_ref3)
TEST_DECLARE (pipe_ref4)
TEST_DECLARE (process_ref)
TEST_DECLARE (active)
TEST_DECLARE (embed)
TEST_DECLARE (async)
TEST_DECLARE (get_currentexe)
TEST_DECLARE (process_title)
@@ -184,7 +186,11 @@ TEST_DECLARE (fs_readdir_file)
TEST_DECLARE (fs_open_dir)
TEST_DECLARE (fs_rename_to_existing_file)
TEST_DECLARE (threadpool_queue_work_simple)
TEST_DECLARE (threadpool_queue_work_einval)
TEST_DECLARE (threadpool_multiple_event_loops)
TEST_DECLARE (threadpool_cancel_getaddrinfo)
TEST_DECLARE (threadpool_cancel_work)
TEST_DECLARE (threadpool_cancel_fs)
TEST_DECLARE (thread_mutex)
TEST_DECLARE (thread_rwlock)
TEST_DECLARE (thread_create)
@@ -284,6 +290,9 @@ TASK_LIST_START
TEST_ENTRY (tcp_write_to_half_open_connection)
TEST_ENTRY (tcp_unexpected_read)

TEST_ENTRY (tcp_read_stop)
TEST_HELPER (tcp_read_stop, tcp4_echo_server)

TEST_ENTRY (tcp_bind6_error_addrinuse)
TEST_ENTRY (tcp_bind6_error_addrnotavail)
TEST_ENTRY (tcp_bind6_error_fault)
@@ -362,6 +371,8 @@ TASK_LIST_START

TEST_ENTRY (active)

TEST_ENTRY (embed)

TEST_ENTRY (async)

TEST_ENTRY (get_currentexe)
@@ -448,7 +459,11 @@ TASK_LIST_START
TEST_ENTRY (fs_open_dir)
TEST_ENTRY (fs_rename_to_existing_file)
TEST_ENTRY (threadpool_queue_work_simple)
TEST_ENTRY (threadpool_queue_work_einval)
TEST_ENTRY (threadpool_multiple_event_loops)
TEST_ENTRY (threadpool_cancel_getaddrinfo)
TEST_ENTRY (threadpool_cancel_work)
TEST_ENTRY (threadpool_cancel_fs)
TEST_ENTRY (thread_mutex)
TEST_ENTRY (thread_rwlock)
TEST_ENTRY (thread_create)
@@ -0,0 +1,73 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/

#include "uv.h"
#include "task.h"

static uv_timer_t timer_handle;
static uv_tcp_t tcp_handle;
static uv_write_t write_req;


static void fail_cb(void) {
ASSERT(0 && "fail_cb called");
}


static void write_cb(uv_write_t* req, int status) {
uv_close((uv_handle_t*) &timer_handle, NULL);
uv_close((uv_handle_t*) &tcp_handle, NULL);
}


static void timer_cb(uv_timer_t* handle, int status) {
uv_buf_t buf = uv_buf_init("PING", 4);
ASSERT(0 == uv_write(&write_req,
(uv_stream_t*) &tcp_handle,
&buf,
1,
write_cb));
ASSERT(0 == uv_read_stop((uv_stream_t*) &tcp_handle));
}


static void connect_cb(uv_connect_t* req, int status) {
ASSERT(0 == status);
ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 50, 0));
ASSERT(0 == uv_read_start((uv_stream_t*) &tcp_handle,
(uv_alloc_cb) fail_cb,
(uv_read_cb) fail_cb));
}


TEST_IMPL(tcp_read_stop) {
uv_connect_t connect_req;
struct sockaddr_in addr;

addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
ASSERT(0 == uv_timer_init(uv_default_loop(), &timer_handle));
ASSERT(0 == uv_tcp_init(uv_default_loop(), &tcp_handle));
ASSERT(0 == uv_tcp_connect(&connect_req, &tcp_handle, addr, connect_cb));
ASSERT(0 == uv_run(uv_default_loop()));
MAKE_VALGRIND_HAPPY();

return 0;
}
@@ -0,0 +1,266 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/

#include "uv.h"
#include "task.h"

#define INIT_CANCEL_INFO(ci, what) \
do { \
(ci)->reqs = (what); \
(ci)->nreqs = ARRAY_SIZE(what); \
(ci)->stride = sizeof((what)[0]); \
} \
while (0)

struct cancel_info {
void* reqs;
unsigned nreqs;
unsigned stride;
uv_timer_t timer_handle;
};

static uv_cond_t signal_cond;
static uv_mutex_t signal_mutex;
static uv_mutex_t wait_mutex;
static unsigned num_threads;
static unsigned fs_cb_called;
static unsigned work_cb_called;
static unsigned done_cb_called;
static unsigned done2_cb_called;
static unsigned timer_cb_called;
static unsigned getaddrinfo_cb_called;


static void work_cb(uv_work_t* req) {
uv_mutex_lock(&signal_mutex);
uv_cond_signal(&signal_cond);
uv_mutex_unlock(&signal_mutex);

uv_mutex_lock(&wait_mutex);
uv_mutex_unlock(&wait_mutex);

work_cb_called++;
}


static void done_cb(uv_work_t* req, int status) {
done_cb_called++;
free(req);
}


static void saturate_threadpool(void) {
uv_work_t* req;

ASSERT(0 == uv_cond_init(&signal_cond));
ASSERT(0 == uv_mutex_init(&signal_mutex));
ASSERT(0 == uv_mutex_init(&wait_mutex));

uv_mutex_lock(&signal_mutex);
uv_mutex_lock(&wait_mutex);

for (num_threads = 0; /* empty */; num_threads++) {
req = malloc(sizeof(*req));
ASSERT(req != NULL);
ASSERT(0 == uv_queue_work(uv_default_loop(), req, work_cb, done_cb));

/* Expect to get signalled within 350 ms, otherwise assume that
* the thread pool is saturated. As with any timing dependent test,
* this is obviously not ideal.
*/
if (uv_cond_timedwait(&signal_cond, &signal_mutex, 350 * 1e6)) {
ASSERT(0 == uv_cancel((uv_req_t*) req));
break;
}
}
}


static void unblock_threadpool(void) {
uv_mutex_unlock(&signal_mutex);
uv_mutex_unlock(&wait_mutex);
}


static void cleanup_threadpool(void) {
ASSERT(done_cb_called == num_threads + 1); /* +1 == cancelled work req. */
ASSERT(work_cb_called == num_threads);

uv_cond_destroy(&signal_cond);
uv_mutex_destroy(&signal_mutex);
uv_mutex_destroy(&wait_mutex);
}


static void fs_cb(uv_fs_t* req) {
ASSERT(req->errorno == UV_ECANCELED);
uv_fs_req_cleanup(req);
fs_cb_called++;
}


static void getaddrinfo_cb(uv_getaddrinfo_t* req,
int status,
struct addrinfo* res) {
ASSERT(UV_ECANCELED == uv_last_error(req->loop).code);
ASSERT(UV_ECANCELED == status);
getaddrinfo_cb_called++;
}


static void work2_cb(uv_work_t* req) {
ASSERT(0 && "work2_cb called");
}


static void done2_cb(uv_work_t* req, int status) {
ASSERT(uv_last_error(req->loop).code == UV_ECANCELED);
ASSERT(status == -1);
done2_cb_called++;
}


static void timer_cb(uv_timer_t* handle, int status) {
struct cancel_info* ci;
uv_req_t* req;
unsigned i;

ci = container_of(handle, struct cancel_info, timer_handle);

for (i = 0; i < ci->nreqs; i++) {
req = (uv_req_t*) ((char*) ci->reqs + i * ci->stride);
ASSERT(0 == uv_cancel(req));
}

uv_close((uv_handle_t*) &ci->timer_handle, NULL);
unblock_threadpool();
timer_cb_called++;
}


TEST_IMPL(threadpool_cancel_getaddrinfo) {
uv_getaddrinfo_t reqs[4];
struct cancel_info ci;
struct addrinfo hints;
uv_loop_t* loop;
int r;

INIT_CANCEL_INFO(&ci, reqs);
loop = uv_default_loop();
saturate_threadpool();

r = uv_getaddrinfo(loop, reqs + 0, getaddrinfo_cb, "fail", NULL, NULL);
ASSERT(r == 0);

r = uv_getaddrinfo(loop, reqs + 1, getaddrinfo_cb, NULL, "fail", NULL);
ASSERT(r == 0);

r = uv_getaddrinfo(loop, reqs + 2, getaddrinfo_cb, "fail", "fail", NULL);
ASSERT(r == 0);

r = uv_getaddrinfo(loop, reqs + 3, getaddrinfo_cb, "fail", NULL, &hints);
ASSERT(r == 0);

ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
ASSERT(0 == uv_run(loop));
ASSERT(1 == timer_cb_called);

cleanup_threadpool();

return 0;
}


TEST_IMPL(threadpool_cancel_work) {
struct cancel_info ci;
uv_work_t reqs[16];
uv_loop_t* loop;
unsigned i;

INIT_CANCEL_INFO(&ci, reqs);
loop = uv_default_loop();
saturate_threadpool();

for (i = 0; i < ARRAY_SIZE(reqs); i++)
ASSERT(0 == uv_queue_work(loop, reqs + i, work2_cb, done2_cb));

ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
ASSERT(0 == uv_run(loop));
ASSERT(1 == timer_cb_called);
ASSERT(ARRAY_SIZE(reqs) == done2_cb_called);

cleanup_threadpool();

return 0;
}


TEST_IMPL(threadpool_cancel_fs) {
struct cancel_info ci;
uv_fs_t reqs[25];
uv_loop_t* loop;
unsigned n;

INIT_CANCEL_INFO(&ci, reqs);
loop = uv_default_loop();
saturate_threadpool();

/* Needs to match ARRAY_SIZE(fs_reqs). */
n = 0;
ASSERT(0 == uv_fs_chmod(loop, reqs + n++, "/", 0, fs_cb));
ASSERT(0 == uv_fs_chown(loop, reqs + n++, "/", 0, 0, fs_cb));
ASSERT(0 == uv_fs_close(loop, reqs + n++, 0, fs_cb));
ASSERT(0 == uv_fs_fchmod(loop, reqs + n++, 0, 0, fs_cb));
ASSERT(0 == uv_fs_fchown(loop, reqs + n++, 0, 0, 0, fs_cb));
ASSERT(0 == uv_fs_fdatasync(loop, reqs + n++, 0, fs_cb));
ASSERT(0 == uv_fs_fstat(loop, reqs + n++, 0, fs_cb));
ASSERT(0 == uv_fs_fsync(loop, reqs + n++, 0, fs_cb));
ASSERT(0 == uv_fs_ftruncate(loop, reqs + n++, 0, 0, fs_cb));
ASSERT(0 == uv_fs_futime(loop, reqs + n++, 0, 0, 0, fs_cb));
ASSERT(0 == uv_fs_link(loop, reqs + n++, "/", "/", fs_cb));
ASSERT(0 == uv_fs_lstat(loop, reqs + n++, "/", fs_cb));
ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb));
ASSERT(0 == uv_fs_open(loop, reqs + n++, "/", 0, 0, fs_cb));
ASSERT(0 == uv_fs_read(loop, reqs + n++, 0, NULL, 0, 0, fs_cb));
ASSERT(0 == uv_fs_readdir(loop, reqs + n++, "/", 0, fs_cb));
ASSERT(0 == uv_fs_readlink(loop, reqs + n++, "/", fs_cb));
ASSERT(0 == uv_fs_rename(loop, reqs + n++, "/", "/", fs_cb));
ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb));
ASSERT(0 == uv_fs_sendfile(loop, reqs + n++, 0, 0, 0, 0, fs_cb));
ASSERT(0 == uv_fs_stat(loop, reqs + n++, "/", fs_cb));
ASSERT(0 == uv_fs_symlink(loop, reqs + n++, "/", "/", 0, fs_cb));
ASSERT(0 == uv_fs_unlink(loop, reqs + n++, "/", fs_cb));
ASSERT(0 == uv_fs_utime(loop, reqs + n++, "/", 0, 0, fs_cb));
ASSERT(0 == uv_fs_write(loop, reqs + n++, 0, NULL, 0, 0, fs_cb));
ASSERT(n == ARRAY_SIZE(reqs));

ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
ASSERT(0 == uv_run(loop));
ASSERT(n == fs_cb_called);
ASSERT(1 == timer_cb_called);

cleanup_threadpool();

return 0;
}
@@ -35,7 +35,8 @@ static void work_cb(uv_work_t* req) {
}


static void after_work_cb(uv_work_t* req) {
static void after_work_cb(uv_work_t* req, int status) {
ASSERT(status == 0);
ASSERT(req == &work_req);
ASSERT(req->data == &data);
after_work_cb_count++;
@@ -56,3 +57,21 @@ TEST_IMPL(threadpool_queue_work_simple) {
MAKE_VALGRIND_HAPPY();
return 0;
}


TEST_IMPL(threadpool_queue_work_einval) {
int r;

work_req.data = &data;
r = uv_queue_work(uv_default_loop(), &work_req, NULL, after_work_cb);
ASSERT(r == -1);

uv_run(uv_default_loop());
ASSERT(uv_last_error(uv_default_loop()).code == UV_EINVAL);

ASSERT(work_cb_count == 0);
ASSERT(after_work_cb_count == 0);

MAKE_VALGRIND_HAPPY();
return 0;
}
@@ -250,6 +250,7 @@
'test/test-cwd-and-chdir.c',
'test/test-delayed-accept.c',
'test/test-error.c',
'test/test-embed.c',
'test/test-fail-always.c',
'test/test-fs.c',
'test/test-fs-event.c',
@@ -298,7 +299,9 @@
'test/test-tcp-write-to-half-open-connection.c',
'test/test-tcp-writealot.c',
'test/test-tcp-unexpected-read.c',
'test/test-tcp-read-stop.c',
'test/test-threadpool.c',
'test/test-threadpool-cancel.c',
'test/test-mutexes.c',
'test/test-thread.c',
'test/test-barrier.c',
@@ -3728,9 +3728,9 @@ void EIO_PBKDF2After(pbkdf2_req* req, Local<Value> argv[2]) {
}


void EIO_PBKDF2After(uv_work_t* work_req) {
void EIO_PBKDF2After(uv_work_t* work_req, int status) {
assert(status == 0);
pbkdf2_req* req = container_of(work_req, pbkdf2_req, work_req);

HandleScope scope;
Local<Value> argv[2];
Persistent<Object> obj = req->obj;
@@ -3902,16 +3902,15 @@ void RandomBytesCheck(RandomBytesRequest* req, Local<Value> argv[2]) {
}


void RandomBytesAfter(uv_work_t* work_req) {
void RandomBytesAfter(uv_work_t* work_req, int status) {
assert(status == 0);
RandomBytesRequest* req = container_of(work_req,
RandomBytesRequest,
work_req_);

HandleScope scope;
Local<Value> argv[2];
RandomBytesCheck(req, argv);
MakeCallback(req->obj_, "ondone", ARRAY_SIZE(argv), argv);

delete req;
}

@@ -213,7 +213,9 @@ class ZCtx : public ObjectWrap {
}

// v8 land!
static void After(uv_work_t* work_req) {
static void After(uv_work_t* work_req, int status) {
assert(status == 0);

HandleScope scope;
ZCtx *ctx = container_of(work_req, ZCtx, work_req_);