Showing with 52 additions and 17 deletions.
  1. +1 −1 src/unix/aix.c
  2. +1 −1 src/unix/core.c
  3. +1 −1 src/unix/darwin.c
  4. +1 −1 src/unix/freebsd.c
  5. +9 −2 src/unix/internal.h
  6. +34 −6 src/unix/linux-core.c
  7. +1 −1 src/unix/loop.c
  8. +1 −1 src/unix/netbsd.c
  9. +1 −1 src/unix/openbsd.c
  10. +1 −1 src/unix/sunos.c
  11. +1 −1 src/unix/thread.c
@@ -45,7 +45,7 @@
#include <sys/proc.h>
#include <sys/procfs.h>

uint64_t uv__hrtime(void) {
uint64_t uv__hrtime(uv_clocktype_t type) {
uint64_t G = 1000000000;
timebasestruct_t t;
read_wall_time(&t, TIMEBASE_SZ);
@@ -73,7 +73,7 @@ STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));


uint64_t uv_hrtime(void) {
return uv__hrtime();
return uv__hrtime(UV_CLOCK_PRECISE);
}


@@ -52,7 +52,7 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}


uint64_t uv__hrtime(void) {
uint64_t uv__hrtime(uv_clocktype_t type) {
mach_timebase_info_data_t info;

if (mach_timebase_info(&info) != KERN_SUCCESS)
@@ -67,7 +67,7 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}


uint64_t uv__hrtime(void) {
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
@@ -128,6 +128,11 @@ enum {
UV_TCP_SINGLE_ACCEPT = 0x1000 /* Only accept() when idle. */
};

typedef enum {
UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */
UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */
} uv_clocktype_t;

/* core */
int uv__nonblock(int fd, int set);
int uv__close(int fd);
@@ -191,7 +196,7 @@ void uv__work_submit(uv_loop_t* loop,
void uv__work_done(uv_async_t* handle, int status);

/* platform specific */
uint64_t uv__hrtime(void);
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop, int default_loop);
void uv__platform_loop_delete(uv_loop_t* loop);
@@ -263,7 +268,9 @@ UV_UNUSED(static void uv__req_init(uv_loop_t* loop,
uv__req_init((loop), (uv_req_t*)(req), (type))

UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
loop->time = uv__hrtime() / 1000000;
/* Use a fast time source if available. We only need millisecond precision.
*/
loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
}

UV_UNUSED(static char* uv__basename_r(const char* path)) {
@@ -52,8 +52,10 @@
# include <linux/if_packet.h>
#endif

#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
/* Available from 2.6.32 onwards. */
#ifndef CLOCK_MONOTONIC_COARSE
# define CLOCK_MONOTONIC_COARSE 6
#endif

/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
* include that file because it conflicts with <time.h>. We'll just have to
@@ -245,10 +247,36 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}


uint64_t uv__hrtime(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
uint64_t uv__hrtime(uv_clocktype_t type) {
static clock_t fast_clock_id = -1;
struct timespec t;
clock_t clock_id;

/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
* decide to make a costly system call.
*/
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
* when it has microsecond granularity or better (unlikely).
*/
if (type == UV_CLOCK_FAST && fast_clock_id == -1) {
if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 &&
t.tv_nsec <= 1 * 1000 * 1000) {
fast_clock_id = CLOCK_MONOTONIC_COARSE;
} else {
fast_clock_id = CLOCK_MONOTONIC;
}
}

clock_id = CLOCK_MONOTONIC;
if (type == UV_CLOCK_FAST)
clock_id = fast_clock_id;

if (clock_gettime(clock_id, &t))
return 0; /* Not really possible. */

return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}


@@ -96,7 +96,7 @@ static int uv__loop_init(uv_loop_t* loop, int default_loop) {
QUEUE_INIT(&loop->watcher_queue);

loop->closing_handles = NULL;
loop->time = uv__hrtime() / 1000000;
uv__update_time(loop);
uv__async_init(&loop->async_watcher);
loop->signal_pipefd[0] = -1;
loop->signal_pipefd[1] = -1;
@@ -57,7 +57,7 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}


uint64_t uv__hrtime(void) {
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
@@ -56,7 +56,7 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}


uint64_t uv__hrtime(void) {
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
@@ -239,7 +239,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}


uint64_t uv__hrtime(void) {
uint64_t uv__hrtime(uv_clocktype_t type) {
return gethrtime();
}

@@ -335,7 +335,7 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
timeout += uv__hrtime();
timeout += uv__hrtime(UV_CLOCK_PRECISE);
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
#if defined(__ANDROID__)