Permalink
Browse files

deps: upgrade libuv to 6f77e45

  • Loading branch information...
1 parent 5ff2ae8 commit f19fca903659e723afb9411057ac04b1ce9edf23 @bnoordhuis committed May 11, 2012
Showing with 79 additions and 32 deletions.
  1. +7 −1 deps/uv/src/unix/core.c
  2. +0 −1 deps/uv/src/unix/ev/ev.c
  3. +1 −0 deps/uv/src/unix/internal.h
  4. +56 −30 deps/uv/src/unix/stream.c
  5. +14 −0 deps/uv/src/unix/tcp.c
  6. +1 −0 deps/uv/test/runner.c
View
8 deps/uv/src/unix/core.c
@@ -83,6 +83,12 @@ void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
uv_read_stop(stream);
ev_io_stop(stream->loop->ev, &stream->write_watcher);
+ /* The next_watcher has been repurposed for listen sockets, reset it.
+ * See uv__server_io() in src/unix/stream.c for the motivation.
+ */
+ ev_idle_stop(handle->loop->ev, &handle->next_watcher);
+ ev_set_cb(&handle->next_watcher, uv__next);
+
uv__close(stream->fd);
stream->fd = -1;
@@ -216,7 +222,7 @@ void uv__handle_init(uv_loop_t* loop, uv_handle_t* handle,
handle->type = type;
handle->flags = 0;
- ev_init(&handle->next_watcher, uv__next);
+ ev_idle_init(&handle->next_watcher, uv__next);
handle->next_watcher.data = handle;
/* Ref the loop until this handle is closed. See uv__finish_close. */
View
1 deps/uv/src/unix/ev/ev.c
@@ -2554,7 +2554,6 @@ void
ev_unref (EV_P)
{
--activecnt;
- if (activecnt < 0) abort();
}
void
View
1 deps/uv/src/unix/internal.h
@@ -152,6 +152,7 @@ enum {
UV_WRITABLE = 0x40, /* The stream is writable */
UV_TCP_NODELAY = 0x080, /* Disable Nagle. */
UV_TCP_KEEPALIVE = 0x100, /* Turn on keep-alive. */
+ UV_TCP_SINGLE_ACCEPT = 0x200, /* Only accept() when idle. */
UV_TIMER_ACTIVE = 0x080,
UV_TIMER_REPEAT = 0x100
};
View
86 deps/uv/src/unix/stream.c
@@ -152,53 +152,79 @@ void uv__stream_destroy(uv_stream_t* stream) {
}
+static void uv__next_accept(EV_P_ ev_idle *watcher, int revents) {
+ uv_stream_t* stream = container_of(watcher, uv_stream_t, next_watcher);
+
+ ev_idle_stop(EV_A_ &stream->next_watcher);
+
+ if (stream->accepted_fd == -1)
+ ev_io_start(EV_A_ &stream->read_watcher);
+}
+
+
void uv__server_io(EV_P_ ev_io* watcher, int revents) {
int fd;
struct sockaddr_storage addr;
uv_stream_t* stream = watcher->data;
- assert(watcher == &stream->read_watcher ||
- watcher == &stream->write_watcher);
+ assert(watcher == &stream->read_watcher || watcher == &stream->write_watcher);
assert(revents == EV_READ);
-
assert(!(stream->flags & UV_CLOSING));
+ assert(stream->accepted_fd == -1);
- if (stream->accepted_fd >= 0) {
- ev_io_stop(EV_A, &stream->read_watcher);
- return;
- }
-
- /* connection_cb can close the server socket while we're
- * in the loop so check it on each iteration.
- */
while (stream->fd != -1) {
- assert(stream->accepted_fd < 0);
fd = uv__accept(stream->fd, (struct sockaddr*)&addr, sizeof addr);
- if (fd < 0) {
- if (errno == EAGAIN || errno == EWOULDBLOCK) {
- /* No problem. */
+ if (fd == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
return;
- } else if (errno == EMFILE) {
- /* TODO special trick. unlock reserved socket, accept, close. */
+ if (errno == EMFILE)
return;
- } else if (errno == ECONNABORTED) {
- /* ignore */
+ if (errno == ECONNABORTED)
continue;
- } else {
- uv__set_sys_error(stream->loop, errno);
- stream->connection_cb((uv_stream_t*)stream, -1);
- }
- } else {
+
+ uv__set_sys_error(stream->loop, errno);
+ stream->connection_cb(stream, -1);
+ }
+ else {
stream->accepted_fd = fd;
- stream->connection_cb((uv_stream_t*)stream, 0);
- if (stream->accepted_fd >= 0) {
- /* The user hasn't yet accepted called uv_accept() */
- ev_io_stop(stream->loop->ev, &stream->read_watcher);
- return;
- }
+ stream->connection_cb(stream, 0);
+ }
+
+ if (stream->accepted_fd != -1 || stream->flags & UV_TCP_SINGLE_ACCEPT) {
+ ev_io_stop(EV_A_ &stream->read_watcher);
+ break;
}
}
+
+ if (stream->flags & UV_TCP_SINGLE_ACCEPT &&
+ stream->accepted_fd == -1 &&
+ stream->fd != -1)
+ {
+ /* We piggyback on the next_watcher. This is a hack to guarantee fair
+ * connection load balancing in multi-process setups. The problem is
+ * as follows:
+ *
+ * 1. Multiple processes listen on the same socket.
+ * 2. The OS scheduler commonly gives preference to one process to
+ * avoid task switches.
+ * 3. That process therefore accepts most of the new connections,
+ * leading to a (sometimes very) unevenly distributed load.
+ *
+ * Here is how we mitigate this issue:
+ *
+ * 1. Accept a connection.
+ * 2. Start an idle watcher.
+ * 3. Don't accept new connections until the idle callback fires.
+ *
+ * This works because the callback only fires when there have been
+ * no recent events, i.e. none of the watched file descriptors have
+ * recently been readable or writable.
+ */
+ assert(!ev_is_active(&stream->next_watcher));
+ ev_set_cb(&stream->next_watcher, uv__next_accept);
+ ev_idle_start(EV_A_ &stream->next_watcher);
+ }
}
View
14 deps/uv/src/unix/tcp.c
@@ -22,6 +22,7 @@
#include "uv.h"
#include "internal.h"
+#include <stdlib.h>
#include <assert.h>
#include <errno.h>
@@ -167,8 +168,17 @@ int uv_tcp_getpeername(uv_tcp_t* handle, struct sockaddr* name,
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
+ static int single_accept = -1;
int r;
+ if (single_accept == -1) {
+ const char *val = getenv("UV_TCP_SINGLE_ACCEPT");
+ single_accept = val && !!atoi(val);
+ }
+
+ if (single_accept)
+ tcp->flags |= UV_TCP_SINGLE_ACCEPT;
+
if (tcp->delayed_error) {
uv__set_sys_error(tcp->loop, tcp->delayed_error);
return -1;
@@ -322,5 +332,9 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
+ if (enable)
+ handle->flags |= UV_TCP_SINGLE_ACCEPT;
+ else
+ handle->flags &= ~UV_TCP_SINGLE_ACCEPT;
return 0;
}
View
1 deps/uv/test/runner.c
@@ -86,6 +86,7 @@ int run_test(const char* test, int timeout, int benchmark_output) {
int i;
status = 255;
+ main_proc = NULL;
process_count = 0;
/* If it's a helper the user asks for, start it directly. */

0 comments on commit f19fca9

Please sign in to comment.