diff --git a/deps/uv/.mailmap b/deps/uv/.mailmap index bf12432495de03..97f5d1f2c004c9 100644 --- a/deps/uv/.mailmap +++ b/deps/uv/.mailmap @@ -4,6 +4,7 @@ Aaron Bieber Alan Gutierrez Andrius Bentkus Andy Fiddaman +Andy Pan Bert Belder Bert Belder Bert Belder @@ -18,6 +19,7 @@ David Carlier Devchandra Meetei Leishangthem Fedor Indutny Frank Denis +Hüseyin Açacak <110401522+huseyinacacak-janea@users.noreply.github.com> Imran Iqbal Isaac Z. Schlueter Jason Williams @@ -37,6 +39,7 @@ Michael Neumann Michael Penick Nicholas Vavilov Nick Logan +Olivier Valentin Rasmus Christian Pedersen Rasmus Christian Pedersen Richard Lau @@ -47,8 +50,8 @@ Sakthipriyan Vairamani Sam Roberts San-Tai Hsu Santiago Gimeno -Saúl Ibarra Corretgé -Saúl Ibarra Corretgé +Saúl Ibarra Corretgé +Saúl Ibarra Corretgé Shigeki Ohtsu Shuowang (Wayne) Zhang TK-one diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS index f3942ced3c5703..807440b30e8488 100644 --- a/deps/uv/AUTHORS +++ b/deps/uv/AUTHORS @@ -567,3 +567,24 @@ Ardi Nugraha <33378542+ardi-nugraha@users.noreply.github.com> Anton Bachin Trevor Flynn Andy Pan +Viacheslav Muravyev +Anthony Alayo +Thomas Walter <31201229+waltoss@users.noreply.github.com> +hiiizxf <385122613@qq.com> +Geddy +Farzin Monsef +tgolang <154592711+tgolang@users.noreply.github.com> +josedelinux +Hüseyin Açacak <110401522+huseyinacacak-janea@users.noreply.github.com> +Uilian Ries +Olivier Valentin +郑苏波 (Super Zheng) +zeertzjq +Ian Butterworth +握猫猫 <164346864@qq.com> +Zuohui Yang <274048862@qq.com> +Edigleysson Silva (Edy) +Raihaan Shouhell +Rialbat +Adam +Poul T Lomholt diff --git a/deps/uv/CMakeLists.txt b/deps/uv/CMakeLists.txt index 5e8e0166d743bc..28c6df25666967 100644 --- a/deps/uv/CMakeLists.txt +++ b/deps/uv/CMakeLists.txt @@ -81,15 +81,20 @@ if(TSAN) endif() if(UBSAN) + cmake_minimum_required(VERSION 3.13) list(APPEND uv_defines __UBSAN__=1) if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") - elseif(MSVC) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=undefined") + add_compile_options("-fsanitize=undefined" "-fno-sanitize-recover=undefined") + if (NOT WIN32) + add_link_options("-fsanitize=undefined") + endif() + if(MSVC) + add_compile_options("/Oy-") + else() + add_compile_options("-fno-omit-frame-pointer") + endif() else() - message(SEND_ERROR "UndefinedBehaviorSanitizer support requires clang, gcc, or msvc. Try again with -DCMAKE_C_COMPILER.") + message(SEND_ERROR "UndefinedBehaviorSanitizer support requires clang or gcc. Try again with -DCMAKE_C_COMPILER.") endif() endif() @@ -307,6 +312,7 @@ if(APPLE) endif() if(CMAKE_SYSTEM_NAME STREQUAL "GNU") + list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112 _XOPEN_SOURCE=500) list(APPEND uv_libraries dl) list(APPEND uv_sources src/unix/bsd-ifaddrs.c @@ -566,6 +572,7 @@ if(LIBUV_BUILD_TESTS) test/test-hrtime.c test/test-idle.c test/test-idna.c + test/test-iouring-pollhup.c test/test-ip4-addr.c test/test-ip6-addr.c test/test-ip-name.c @@ -643,6 +650,7 @@ if(LIBUV_BUILD_TESTS) test/test-tcp-oob.c test/test-tcp-open.c test/test-tcp-read-stop.c + test/test-tcp-reuseport.c test/test-tcp-read-stop-start.c test/test-tcp-rst.c test/test-tcp-shutdown-after-write.c @@ -691,6 +699,7 @@ if(LIBUV_BUILD_TESTS) test/test-udp-send-unreachable.c test/test-udp-try-send.c test/test-udp-recv-in-a-row.c + test/test-udp-reuseport.c test/test-uname.c test/test-walk-handles.c test/test-watcher-cross-stop.c) @@ -787,6 +796,14 @@ if(MSVC) endif() endif() +if(BUILD_SHARED_LIBS) + set(LIB_SELECTED uv) +else() + set(LIB_SELECTED uv_a) +endif() + +add_library(libuv::libuv ALIAS ${LIB_SELECTED}) + message(STATUS "summary of build options: Install prefix: ${CMAKE_INSTALL_PREFIX} Target system: ${CMAKE_SYSTEM_NAME} diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog index 05c1cb7e748b43..e1d1aa32989124 100644 --- a/deps/uv/ChangeLog +++ b/deps/uv/ChangeLog @@ -1,4 +1,237 @@ -2024.02.07, Version 1.48.0 (Stable) +2024.10.11, Version 1.49.1 (Stable) + +Changes since version 1.49.0: + +* build: add darwin-syscalls.h to release tarball (Ben Noordhuis) + +* linux: use IORING_SETUP_NO_SQARRAY when available (Ben Noordhuis) + +* linux: use IORING_OP_FTRUNCATE when available (Ben Noordhuis) + +* win: fix pNtQueryDirectoryFile check (Rialbat) + +* win: fix WriteFile() error translation (Santiago Gimeno) + +* win,fs: uv_fs_rmdir() to return ENOENT on file (Santiago Gimeno) + +* win,pipe: ipc code does not support async read (Jameson Nash) + +* netbsd: fix build (Adam) + +* win,fs: fix bug in fs__readdir (Hüseyin Açacak) + +* unix: workaround gcc bug on armv7 (Santiago Gimeno) + +* unix: work around arm-linux-gnueabihf-gcc bug (Ben Noordhuis) + +* unix: fix uv_tcp_keepalive in smartOS (Santiago Gimeno) + +* unix: fix uv_getrusage ru_maxrss on solaris (Poul T Lomholt) + + +2024.09.25, Version 1.49.0 (Stable), d2e56a5e8d3e39947b78405ca6e4727c70f5568a + +Changes since version 1.48.0: + +* test: fix -Wpointer-to-int-cast on 32 bits systems (Ben Noordhuis) + +* build: add alias for libuv to CMakeLists.txt (Anthony Alayo) + +* linux: create io_uring sqpoll ring lazily (Ben Noordhuis) + +* misc: run sample CI when code changes (Jameson Nash) + +* linux: fix uv_available_parallelism using cgroup (Thomas Walter) + +* doc: fix tty example segfault (hiiizxf) + +* udp,unix: fix sendmsg use-after-free (Geddy) + +* cygwin: implement uv_resident_set_memory (Farzin Monsef) + +* win: almost fix race detecting ESRCH in uv_kill (Santiago Gimeno) + +* test: disable env var test under win32+asan (Ben Noordhuis) + +* unix,fs: fix realpath calls that use the system allocator (Saúl Ibarra + Corretgé) + +* sunos: sync tcp keep-alive with other unices (Andy Pan) + +* linux: fix /proc/self/stat executable name parsing (Farzin Monsef) + +* test,ci: fix [AM]San, disable ASLR (Ben Noordhuis) + +* win: remove _alloca usage (Ben Noordhuis) + +* unix: reinstate preadv/pwritev fallback code (Ben Noordhuis) + +* linux: don't delay EPOLL_CTL_DEL operations (Ben Noordhuis) + +* doc: fix typos in ChangeLog (tgolang) + +* unix,win: error on zero delay tcp keepalive (Saúl Ibarra Corretgé) + +* win: simplify uv_once implementation (Saúl Ibarra Corretgé) + +* doc: correct udp socket options documentation (Ben Noordhuis) + +* linux: don't use sendmmsg() for single datagrams (Ben Noordhuis) + +* unix: fix fd leaks in SCM_RIGHTS error path (Ben Noordhuis) + +* win: robustify uv_os_getenv() error checking (Ben Noordhuis) + +* test: use newer ASSERT_MEM_EQ macro (Ben Noordhuis) + +* unix: de-duplicate conditions for using kqueue (Brad King) + +* darwin: simplify uv_hrtime (Saúl Ibarra Corretgé) + +* mailmap: update saghul's main email address (Saúl Ibarra Corretgé) + +* win: remove no longer needed define (Saúl Ibarra Corretgé) + +* doc: fix some typos (josedelinux) + +* linux,darwin: make `uv_fs_copyfile` behaves like `cp -r` (Juan José Arboleda) + +* dragonfly: disable SO_REUSEPORT for UDP socket bindings (Andy Pan) + +* test: remove the obsolete HAVE_KQUEUE macro (Andy Pan) + +* unix: use the presence of SOCK_* instead of OS macros for socketpair (Andy + Pan) + +* bsd: support pipe2() on *BSD (Andy Pan) + +* unix: support SO_REUSEPORT with load balancing for TCP (Andy Pan) + +* doc: add entries for extended getpw (Juan José Arboleda) + +* test: fix the flaky test-tcp-reuseport (Andy Pan) + +* aix,ibmi: fix compilation errors in fs_copyfile (Jeffrey H. Johnson) + +* unix: support SO_REUSEPORT with load balancing for UDP (Andy Pan) + +* tcpkeepalive: distinguish OS versions and use proper time units (Andy Pan) + +* win: map ERROR_BAD_EXE_FORMAT to UV_EFTYPE (Hüseyin Açacak) + +* doc: add instruction how to install with Conan (Uilian Ries) + +* unix,win: remove unused req parameter from macros (Viacheslav Muravyev) + +* build: fix android ci build (Ben Noordhuis) + +* unix,win: export wtf8 functions properly (Ben Noordhuis) + +* hurd: add includes and macro prerequisites (Olivier Valentin) + +* hurd: stub uv_thread_setpriority() (Olivier Valentin) + +* ci: use macOS 12 for macOS and iOS builds (Saúl Ibarra Corretgé) + +* darwin: fix crash on iOS(arm64) (郑苏波 (Super Zheng)) + +* Create dependabot.yml for updating github-actions (Jameson Nash) + +* doc: correct names of Win32 APIs in fs.rst (zeertzjq) + +* ci: bump upload and download-artifact versions (dependabot[bot]) + +* ci: bump actions/setup-python from 4 to 5 (dependabot[bot]) + +* ci: bump KyleMayes/install-llvm-action from 1 to 2 (dependabot[bot]) + +* win,error: remap ERROR_NO_DATA to EAGAIN (Jameson Nash) + +* test: handle zero-length udp datagram (Ben Noordhuis) + +* misc: remove splay trees macros (Viacheslav Muravyev) + +* test,openbsd: remove superfluous ifdef guard (Ben Noordhuis) + +* win,fs: use posix delete semantics, if supported (Ian Butterworth) + +* win: fix env var in uv_os_homedir and uv_os_tmpdir (Hüseyin Açacak) + +* fsevents: detect watched directory removal (Santiago Gimeno) + +* ci: bump actions/checkout to 4 (dependabot[bot]) + +* linux: eliminate a read on eventfd per wakeup (Andy Pan) + +* test: pipe_overlong_path handle ENAMETOOLONG (Abdirahim Musse) + +* win,fs: use the new Windows fast stat API (Hüseyin Açacak) + +* win,pipe: fix race with concurrent readers (Jameson Nash) + +* win,signal: fix data race dispatching SIGWINCH (Jameson Nash) + +* build: ubsan fixes (Matheus Izvekov) + +* linux: disable SQPOLL io_uring by default (Santiago Gimeno) + +* win: fix fs.c ubsan failure (Matheus Izvekov) + +* test: rmdir can return `EEXIST` or `ENOTEMPTY` (Richard Lau) + +* test: check for `UV_CHANGE` or `UV_RENAME` event (Richard Lau) + +* unix,fs: silence -Wunused-result warning (Santiago Gimeno) + +* linux: support abstract unix socket autobinding (Ben Noordhuis) + +* kqueue: use EVFILT_USER for async if available (Andy Pan) + +* win: remove deprecated GetVersionExW call (Shelley Vohr) + +* doc: document uv_loop_option (握猫猫) + +* doc: fix the `uv_*_set_data` series of functions (握猫猫) + +* doc: properly label enumerations and types (握猫猫) + +* doc: document specific macOS fs_event behavior (Santiago Gimeno) + +* win,pipe: restore fallback handling for blocking pipes (Jameson Nash) + +* unix,win: remove unused rb-tree macro parameters (Viacheslav Muravyev) + +* win: compute parallelism from process cpu affinity (Ben Noordhuis) + +* win: use NtQueryInformationProcess in uv_os_getppid (Zuohui Yang) + +* win,pipe: fix missing assignment to success (Jameson Nash) + +* win: fix uv_available_parallelism on win32 (Ben Noordhuis) + +* win,pipe: fix another missing assignment to success (Jameson Nash) + +* kqueue: disallow ill-suited file descriptor kinds (Andy Pan) + +* unix: restore tty attributes on handle close (Ben Noordhuis) + +* test: delete test with invalid assumption (Ben Noordhuis) + +* dragonflybsd: fix compilation failure (Jeffrey H. Johnson) + +* test: run android tests on ci (Edigleysson Silva (Edy)) + +* darwin: add udp mmsg support (Raihaan Shouhell) + +* unix: work around arm-linux-gnueabihf-gcc bug (Ben Noordhuis) + +* unix: expand uv_available_parallelism() to support more platforms (Ondřej + Surý) + +* doc: add known issue in armv7 (Santiago Gimeno) + + +2024.02.07, Version 1.48.0 (Stable), e9f29cb984231524e3931aa0ae2c5dae1a32884e Changes since version 1.47.0: @@ -911,7 +1144,7 @@ Changes since version 1.41.0: * zos: treat __rfim_utok as binary (Shuowang (Wayne) Zhang) -* zos: use execvpe() to set environ explictly (Shuowang (Wayne) Zhang) +* zos: use execvpe() to set environ explicitly (Shuowang (Wayne) Zhang) * zos: use custom proctitle implementation (Shuowang (Wayne) Zhang) @@ -3417,7 +3650,7 @@ Changes since version 1.9.1: * zos: implement uv__io_check_fd (John Barboza) -* unix: unneccessary use const qualifier in container_of (John Barboza) +* unix: unnecessary use const qualifier in container_of (John Barboza) * win,tty: add support for ANSI codes in win10 v1511 (Imran Iqbal) @@ -5520,7 +5753,7 @@ Changes since version 0.11.8: is an int64_t, and no longer an int. (Bert Belder) * process: make uv_spawn() return some types of errors immediately on windows, - instead of passing the error code the the exit callback. This brings it on + instead of passing the error code the exit callback. This brings it on par with libuv's behavior on unix. (Bert Belder) diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am index a14228da3bf7b2..f85a41316c8a43 100644 --- a/deps/uv/Makefile.am +++ b/deps/uv/Makefile.am @@ -198,6 +198,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-hrtime.c \ test/test-idle.c \ test/test-idna.c \ + test/test-iouring-pollhup.c \ test/test-ip4-addr.c \ test/test-ip6-addr.c \ test/test-ip-name.c \ @@ -275,6 +276,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-tcp-flags.c \ test/test-tcp-open.c \ test/test-tcp-read-stop.c \ + test/test-tcp-reuseport.c \ test/test-tcp-read-stop-start.c \ test/test-tcp-rst.c \ test/test-tcp-shutdown-after-write.c \ @@ -324,6 +326,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-udp-send-unreachable.c \ test/test-udp-try-send.c \ test/test-udp-recv-in-a-row.c \ + test/test-udp-reuseport.c \ test/test-uname.c \ test/test-walk-handles.c \ test/test-watcher-cross-stop.c @@ -427,6 +430,7 @@ libuv_la_CFLAGS += -D_DARWIN_UNLIMITED_SELECT=1 libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \ src/unix/darwin-proctitle.c \ src/unix/darwin-stub.h \ + src/unix/darwin-syscalls.h \ src/unix/darwin.c \ src/unix/fsevents.c \ src/unix/kqueue.c \ diff --git a/deps/uv/README.md b/deps/uv/README.md index 09e9bf10b6dc31..12c3061a894c56 100644 --- a/deps/uv/README.md +++ b/deps/uv/README.md @@ -232,6 +232,18 @@ $ ./bootstrap-vcpkg.sh # for bash $ ./vcpkg install libuv ``` +### Install with Conan + +You can install pre-built binaries for libuv or build it from source using [Conan](https://conan.io/). Use the following command: + +```bash +conan install --requires="libuv/[*]" --build=missing +``` + +The libuv Conan recipe is kept up to date by Conan maintainers and community contributors. +If the version is out of date, please [create an issue or pull request](https://github.com/conan-io/conan-center-index) on the ConanCenterIndex repository. + + ### Running tests Some tests are timing sensitive. Relaxing test timeouts may be necessary diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac index d4cc003e34388d..e3ee8a840c6872 100644 --- a/deps/uv/configure.ac +++ b/deps/uv/configure.ac @@ -13,7 +13,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. AC_PREREQ(2.57) -AC_INIT([libuv], [1.48.0], [https://github.com/libuv/libuv/issues]) +AC_INIT([libuv], [1.49.1], [https://github.com/libuv/libuv/issues]) AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/as_case.m4]) diff --git a/deps/uv/docs/src/fs.rst b/deps/uv/docs/src/fs.rst index 891ee74c19d912..7bc8d0cbfd165d 100644 --- a/deps/uv/docs/src/fs.rst +++ b/deps/uv/docs/src/fs.rst @@ -16,7 +16,10 @@ Starting with libuv v1.45.0, some file operations on Linux are handed off to `io_uring ` when possible. Apart from a (sometimes significant) increase in throughput there should be no change in observable behavior. Libuv reverts to using its threadpool when the necessary -kernel features are unavailable or unsuitable. +kernel features are unavailable or unsuitable. Starting with libuv v1.49.0 this +behavior was reverted and Libuv on Linux by default will be using the threadpool +again. In order to enable io_uring the :c:type:`uv_loop_t` instance must be +configured with the :c:type:`UV_LOOP_ENABLE_IO_URING_SQPOLL` option. .. note:: On Windows `uv_fs_*` functions use utf-8 encoding. @@ -129,10 +132,9 @@ Data types uint64_t f_spare[4]; } uv_statfs_t; -.. c:enum:: uv_dirent_t +.. c:enum:: uv_dirent_type_t - Cross platform (reduced) equivalent of ``struct dirent``. - Used in :c:func:`uv_fs_scandir_next`. + Type of dirent. :: @@ -147,6 +149,14 @@ Data types UV_DIRENT_BLOCK } uv_dirent_type_t; + +.. c:type:: uv_dirent_t + + Cross platform (reduced) equivalent of ``struct dirent``. + Used in :c:func:`uv_fs_scandir_next`. + + :: + typedef struct uv_dirent_s { const char* name; uv_dirent_type_t type; @@ -454,7 +464,7 @@ API .. c:function:: int uv_fs_realpath(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) - Equivalent to :man:`realpath(3)` on Unix. Windows uses `GetFinalPathNameByHandle `_. + Equivalent to :man:`realpath(3)` on Unix. Windows uses `GetFinalPathNameByHandleW `_. The resulting string is stored in `req->ptr`. .. warning:: @@ -653,7 +663,7 @@ File open constants .. note:: `UV_FS_O_RANDOM` is only supported on Windows via - `FILE_FLAG_RANDOM_ACCESS `_. + `FILE_FLAG_RANDOM_ACCESS `_. .. c:macro:: UV_FS_O_RDONLY @@ -670,7 +680,7 @@ File open constants .. note:: `UV_FS_O_SEQUENTIAL` is only supported on Windows via - `FILE_FLAG_SEQUENTIAL_SCAN `_. + `FILE_FLAG_SEQUENTIAL_SCAN `_. .. c:macro:: UV_FS_O_SHORT_LIVED @@ -678,7 +688,7 @@ File open constants .. note:: `UV_FS_O_SHORT_LIVED` is only supported on Windows via - `FILE_ATTRIBUTE_TEMPORARY `_. + `FILE_ATTRIBUTE_TEMPORARY `_. .. c:macro:: UV_FS_O_SYMLINK @@ -699,7 +709,7 @@ File open constants .. note:: `UV_FS_O_TEMPORARY` is only supported on Windows via - `FILE_ATTRIBUTE_TEMPORARY `_. + `FILE_ATTRIBUTE_TEMPORARY `_. .. c:macro:: UV_FS_O_TRUNC diff --git a/deps/uv/docs/src/fs_event.rst b/deps/uv/docs/src/fs_event.rst index 54a776ae6f1453..983db1a9d5608a 100644 --- a/deps/uv/docs/src/fs_event.rst +++ b/deps/uv/docs/src/fs_event.rst @@ -45,9 +45,9 @@ Data types be a relative path to a file contained in the directory, or `NULL` if the file name cannot be determined. - The `events` parameter is an ORed mask of :c:type:`uv_fs_event` elements. + The `events` parameter is an ORed mask of :c:enum:`uv_fs_event` elements. -.. c:type:: uv_fs_event +.. c:enum:: uv_fs_event Event types that :c:type:`uv_fs_event_t` handles monitor. @@ -58,7 +58,7 @@ Data types UV_CHANGE = 2 }; -.. c:type:: uv_fs_event_flags +.. c:enum:: uv_fs_event_flags Flags that can be passed to :c:func:`uv_fs_event_start` to control its behavior. @@ -109,10 +109,13 @@ API .. c:function:: int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags) Start the handle with the given callback, which will watch the specified - `path` for changes. `flags` can be an ORed mask of :c:type:`uv_fs_event_flags`. + `path` for changes. `flags` can be an ORed mask of :c:enum:`uv_fs_event_flags`. .. note:: Currently the only supported flag is ``UV_FS_EVENT_RECURSIVE`` and only on OSX and Windows. + .. note:: On macOS, events collected by the OS immediately before calling + ``uv_fs_event_start`` might be reported to the `uv_fs_event_cb` + callback. .. c:function:: int uv_fs_event_stop(uv_fs_event_t* handle) diff --git a/deps/uv/docs/src/handle.rst b/deps/uv/docs/src/handle.rst index e91d6e8fb2d906..2b1b8eec968844 100644 --- a/deps/uv/docs/src/handle.rst +++ b/deps/uv/docs/src/handle.rst @@ -94,7 +94,7 @@ Public members .. c:member:: uv_handle_type uv_handle_t.type - The :c:type:`uv_handle_type`, indicating the type of the underlying handle. Readonly. + The :c:enum:`uv_handle_type`, indicating the type of the underlying handle. Readonly. .. c:member:: void* uv_handle_t.data @@ -248,7 +248,7 @@ just for some handle types. .. versionadded:: 1.19.0 -.. c:function:: void* uv_handle_set_data(uv_handle_t* handle, void* data) +.. c:function:: void uv_handle_set_data(uv_handle_t* handle, void* data) Sets `handle->data` to `data`. diff --git a/deps/uv/docs/src/loop.rst b/deps/uv/docs/src/loop.rst index 0f5ddfb3ca21b7..d1f41e1c9f4483 100644 --- a/deps/uv/docs/src/loop.rst +++ b/deps/uv/docs/src/loop.rst @@ -16,6 +16,19 @@ Data types Loop data type. +.. c:enum:: uv_loop_option + + Additional loop options. + See :c:func:`uv_loop_configure`. + + :: + + typedef enum { + UV_LOOP_BLOCK_SIGNAL = 0, + UV_METRICS_IDLE_TIME, + UV_LOOP_USE_IO_URING_SQPOLL + } uv_loop_option; + .. c:enum:: uv_run_mode Mode used to run the loop with :c:func:`uv_run`. @@ -73,8 +86,13 @@ API This option is necessary to use :c:func:`uv_metrics_idle_time`. + - UV_LOOP_ENABLE_IO_URING_SQPOLL: Enable SQPOLL io_uring instance to handle + asynchronous file system operations. + .. versionchanged:: 1.39.0 added the UV_METRICS_IDLE_TIME option. + .. versionchanged:: 1.49.0 added the UV_LOOP_ENABLE_IO_URING_SQPOLL option. + .. c:function:: int uv_loop_close(uv_loop_t* loop) Releases all internal loop resources. Call this function only when the loop @@ -238,7 +256,7 @@ API .. versionadded:: 1.19.0 -.. c:function:: void* uv_loop_set_data(uv_loop_t* loop, void* data) +.. c:function:: void uv_loop_set_data(uv_loop_t* loop, void* data) Sets `loop->data` to `data`. diff --git a/deps/uv/docs/src/misc.rst b/deps/uv/docs/src/misc.rst index 989618304d16e5..61883b7e21e527 100644 --- a/deps/uv/docs/src/misc.rst +++ b/deps/uv/docs/src/misc.rst @@ -199,6 +199,18 @@ Data types char* homedir; } uv_passwd_t; +.. c:type:: uv_group_t + + Data type for group file information. + + :: + + typedef struct uv_group_s { + char* groupname; + unsigned long gid; + char** members; + } uv_group_t; + .. c:type:: uv_utsname_t Data type for operating system name and version information. @@ -566,6 +578,35 @@ API .. versionadded:: 1.9.0 +.. c:function:: int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) + + Gets a subset of the password file entry for the provided uid. + The populated data includes the username, euid, gid, shell, + and home directory. On non-Windows systems, all data comes from + :man:`getpwuid_r(3)`. On Windows, uid and gid are set to -1 and have no + meaning, and shell is `NULL`. After successfully calling this function, the + memory allocated to `pwd` needs to be freed with + :c:func:`uv_os_free_passwd`. + + .. versionadded:: 1.45.0 + +.. c:function:: int uv_os_get_group(uv_group_t* group, uv_uid_t gid) + + Gets a subset of the group file entry for the provided uid. + The populated data includes the group name, gid, and members. On non-Windows + systems, all data comes from :man:`getgrgid_r(3)`. On Windows, uid and gid + are set to -1 and have no meaning. After successfully calling this function, + the memory allocated to `group` needs to be freed with + :c:func:`uv_os_free_group`. + + .. versionadded:: 1.45.0 + +.. c:function:: void uv_os_free_group(uv_passwd_t* pwd) + + Frees the memory previously allocated with :c:func:`uv_os_get_group`. + + .. versionadded:: 1.45.0 + .. c:function:: void uv_os_free_passwd(uv_passwd_t* pwd) Frees the `pwd` memory previously allocated with :c:func:`uv_os_get_passwd`. diff --git a/deps/uv/docs/src/poll.rst b/deps/uv/docs/src/poll.rst index f501089279d55e..b598f0737be9e8 100644 --- a/deps/uv/docs/src/poll.rst +++ b/deps/uv/docs/src/poll.rst @@ -45,7 +45,7 @@ Data types Type definition for callback passed to :c:func:`uv_poll_start`. -.. c:type:: uv_poll_event +.. c:enum:: uv_poll_event Poll event types diff --git a/deps/uv/docs/src/process.rst b/deps/uv/docs/src/process.rst index 8d2fdb3e47988d..f15dcf610f55ca 100644 --- a/deps/uv/docs/src/process.rst +++ b/deps/uv/docs/src/process.rst @@ -40,7 +40,7 @@ Data types will indicate the exit status and the signal that caused the process to terminate, if any. -.. c:type:: uv_process_flags +.. c:enum:: uv_process_flags Flags to be set on the flags field of :c:type:`uv_process_options_t`. @@ -190,7 +190,7 @@ Public members Command line arguments. args[0] should be the path to the program. On Windows this uses `CreateProcess` which concatenates the arguments into a string this can cause some strange errors. See the - ``UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS`` flag on :c:type:`uv_process_flags`. + ``UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS`` flag on :c:enum:`uv_process_flags`. .. c:member:: char** uv_process_options_t.env @@ -203,7 +203,7 @@ Public members .. c:member:: unsigned int uv_process_options_t.flags Various flags that control how :c:func:`uv_spawn` behaves. See - :c:type:`uv_process_flags`. + :c:enum:`uv_process_flags`. .. c:member:: int uv_process_options_t.stdio_count .. c:member:: uv_stdio_container_t* uv_process_options_t.stdio diff --git a/deps/uv/docs/src/request.rst b/deps/uv/docs/src/request.rst index a0414431b0e092..aacabe026a7525 100644 --- a/deps/uv/docs/src/request.rst +++ b/deps/uv/docs/src/request.rst @@ -21,17 +21,9 @@ Data types Union of all request types. +.. c:enum:: uv_req_type -Public members -^^^^^^^^^^^^^^ - -.. c:member:: void* uv_req_t.data - - Space for user-defined arbitrary data. libuv does not use this field. - -.. c:member:: uv_req_type uv_req_t.type - - Indicated the type of request. Readonly. + The kind of the libuv request. :: @@ -50,6 +42,18 @@ Public members } uv_req_type; +Public members +^^^^^^^^^^^^^^ + +.. c:member:: void* uv_req_t.data + + Space for user-defined arbitrary data. libuv does not use this field. + +.. c:member:: uv_req_type uv_req_t.type + + The :c:enum:`uv_req_type`, indicating the type of the request. Readonly. + + API --- @@ -95,7 +99,7 @@ API .. versionadded:: 1.19.0 -.. c:function:: void* uv_req_set_data(uv_req_t* req, void* data) +.. c:function:: void uv_req_set_data(uv_req_t* req, void* data) Sets `req->data` to `data`. diff --git a/deps/uv/docs/src/tcp.rst b/deps/uv/docs/src/tcp.rst index cccc86bbfc0335..f9b203c41997d9 100644 --- a/deps/uv/docs/src/tcp.rst +++ b/deps/uv/docs/src/tcp.rst @@ -16,6 +16,28 @@ Data types TCP handle type. +.. c:enum:: uv_tcp_flags + + Flags used in :c:func:`uv_tcp_bind`. + + :: + + enum uv_tcp_flags { + /* Used with uv_tcp_bind, when an IPv6 address is used. */ + UV_TCP_IPV6ONLY = 1, + + /* Enable SO_REUSEPORT socket option when binding the handle. + * This allows completely duplicate bindings by multiple processes + * or threads if they all set SO_REUSEPORT before binding the port. + * Incoming connections are distributed across the participating + * listener sockets. + * + * This flag is available only on Linux 3.9+, DragonFlyBSD 3.6+, + * FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ for now. + */ + UV_TCP_REUSEPORT = 2, + }; + Public members ^^^^^^^^^^^^^^ @@ -65,6 +87,10 @@ API at the end of this procedure, then the handle is destroyed with a ``UV_ETIMEDOUT`` error passed to the corresponding callback. + If `delay` is less than 1 then ``UV_EINVAL`` is returned. + + .. versionchanged:: 1.49.0 If `delay` is less than 1 then ``UV_EINVAL``` is returned. + .. c:function:: int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) Enable / disable simultaneous asynchronous accept requests that are @@ -77,16 +103,34 @@ API .. c:function:: int uv_tcp_bind(uv_tcp_t* handle, const struct sockaddr* addr, unsigned int flags) - Bind the handle to an address and port. `addr` should point to an - initialized ``struct sockaddr_in`` or ``struct sockaddr_in6``. + Bind the handle to an address and port. When the port is already taken, you can expect to see an ``UV_EADDRINUSE`` - error from :c:func:`uv_listen` or :c:func:`uv_tcp_connect`. That is, - a successful call to this function does not guarantee that the call - to :c:func:`uv_listen` or :c:func:`uv_tcp_connect` will succeed as well. + error from :c:func:`uv_listen` or :c:func:`uv_tcp_connect` unless you specify + ``UV_TCP_REUSEPORT`` in `flags` for all the binding sockets. That is, a successful + call to this function does not guarantee that the call to :c:func:`uv_listen` or + :c:func:`uv_tcp_connect` will succeed as well. + + :param handle: TCP handle. It should have been initialized with :c:func:`uv_tcp_init`. + + :param addr: Address to bind to. It should point to an initialized ``struct sockaddr_in`` + or ``struct sockaddr_in6``. + + :param flags: Flags that control the behavior of binding the socket. + ``UV_TCP_IPV6ONLY`` can be contained in `flags` to disable dual-stack + support and only use IPv6. + ``UV_TCP_REUSEPORT`` can be contained in `flags` to enable the socket option + `SO_REUSEPORT` with the capability of load balancing that distribute incoming + connections across all listening sockets in multiple processes or threads. - `flags` can contain ``UV_TCP_IPV6ONLY``, in which case dual-stack support - is disabled and only IPv6 is used. + :returns: 0 on success, or an error code < 0 on failure. + + .. versionchanged:: 1.49.0 added the ``UV_TCP_REUSEPORT`` flag. + + .. note:: + ``UV_TCP_REUSEPORT`` flag is available only on Linux 3.9+, DragonFlyBSD 3.6+, + FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ at the moment. On other platforms + this function will return an UV_ENOTSUP error. .. c:function:: int uv_tcp_getsockname(const uv_tcp_t* handle, struct sockaddr* name, int* namelen) diff --git a/deps/uv/docs/src/tty.rst b/deps/uv/docs/src/tty.rst index f1acfdc1372940..7a2235210bf62a 100644 --- a/deps/uv/docs/src/tty.rst +++ b/deps/uv/docs/src/tty.rst @@ -98,7 +98,7 @@ API .. c:function:: int uv_tty_set_mode(uv_tty_t* handle, uv_tty_mode_t mode) .. versionchanged:: 1.2.0: the mode is specified as a - :c:type:`uv_tty_mode_t` value. + :c:enum:`uv_tty_mode_t` value. Set the TTY using the specified terminal mode. diff --git a/deps/uv/docs/src/udp.rst b/deps/uv/docs/src/udp.rst index d7da95edd506e2..31f7f7fd71ff47 100644 --- a/deps/uv/docs/src/udp.rst +++ b/deps/uv/docs/src/udp.rst @@ -18,7 +18,7 @@ Data types UDP send request type. -.. c:type:: uv_udp_flags +.. c:enum:: uv_udp_flags Flags used in :c:func:`uv_udp_bind` and :c:type:`uv_udp_recv_cb`.. @@ -28,19 +28,21 @@ Data types /* Disables dual stack mode. */ UV_UDP_IPV6ONLY = 1, /* - * Indicates message was truncated because read buffer was too small. The - * remainder was discarded by the OS. Used in uv_udp_recv_cb. - */ + * Indicates message was truncated because read buffer was too small. The + * remainder was discarded by the OS. Used in uv_udp_recv_cb. + */ UV_UDP_PARTIAL = 2, /* - * Indicates if SO_REUSEADDR will be set when binding the handle in - * uv_udp_bind. - * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other - * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that - * multiple threads or processes can bind to the same address without error - * (provided they all set the flag) but only the last one to bind will receive - * any traffic, in effect "stealing" the port from the previous listener. - */ + * Indicates if SO_REUSEADDR will be set when binding the handle. + * This sets the SO_REUSEPORT socket flag on the BSDs (except for + * DragonFlyBSD), OS X, and other platforms where SO_REUSEPORTs don't + * have the capability of load balancing, as the opposite of what + * UV_UDP_REUSEPORT would do. On other Unix platforms, it sets the + * SO_REUSEADDR flag. What that means is that multiple threads or + * processes can bind to the same address without error (provided + * they all set the flag) but only the last one to bind will receive + * any traffic, in effect "stealing" the port from the previous listener. + */ UV_UDP_REUSEADDR = 4, /* * Indicates that the message was received by recvmmsg, so the buffer provided @@ -62,8 +64,20 @@ Data types */ UV_UDP_LINUX_RECVERR = 32, /* - * Indicates that recvmmsg should be used, if available. - */ + * Indicates if SO_REUSEPORT will be set when binding the handle. + * This sets the SO_REUSEPORT socket option on supported platforms. + * Unlike UV_UDP_REUSEADDR, this flag will make multiple threads or + * processes that are binding to the same address and port "share" + * the port, which means incoming datagrams are distributed across + * the receiving sockets among threads or processes. + * + * This flag is available only on Linux 3.9+, DragonFlyBSD 3.6+, + * FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ for now. + */ + UV_UDP_REUSEPORT = 64, + /* + * Indicates that recvmmsg should be used, if available. + */ UV_UDP_RECVMMSG = 256 }; @@ -186,11 +200,24 @@ API with the address and port to bind to. :param flags: Indicate how the socket will be bound, - ``UV_UDP_IPV6ONLY``, ``UV_UDP_REUSEADDR``, and ``UV_UDP_RECVERR`` - are supported. + ``UV_UDP_IPV6ONLY``, ``UV_UDP_REUSEADDR``, ``UV_UDP_REUSEPORT``, + and ``UV_UDP_RECVERR`` are supported. :returns: 0 on success, or an error code < 0 on failure. + .. versionchanged:: 1.49.0 added the ``UV_UDP_REUSEPORT`` flag. + + .. note:: + ``UV_UDP_REUSEPORT`` flag is available only on Linux 3.9+, DragonFlyBSD 3.6+, + FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ at the moment. On other platforms + this function will return an UV_ENOTSUP error. + For platforms where `SO_REUSEPORT`s have the capability of load balancing, + specifying both ``UV_UDP_REUSEADDR`` and ``UV_UDP_REUSEPORT`` in flags is allowed + and `SO_REUSEPORT` will always override the behavior of `SO_REUSEADDR`. + For platforms where `SO_REUSEPORT`s don't have the capability of load balancing, + specifying both ``UV_UDP_REUSEADDR`` and ``UV_UDP_REUSEPORT`` in flags will fail, + returning an UV_ENOTSUP error. + .. c:function:: int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) Associate the UDP handle to a remote address and port, so every @@ -285,7 +312,9 @@ API local sockets. :param handle: UDP handle. Should have been initialized with - :c:func:`uv_udp_init`. + :c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have + been bound to an address explicitly with :c:func:`uv_udp_bind`, or + implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`. :param on: 1 for on, 0 for off. @@ -296,7 +325,9 @@ API Set the multicast ttl. :param handle: UDP handle. Should have been initialized with - :c:func:`uv_udp_init`. + :c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have + been bound to an address explicitly with :c:func:`uv_udp_bind`, or + implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`. :param ttl: 1 through 255. @@ -307,7 +338,9 @@ API Set the multicast interface to send or receive data on. :param handle: UDP handle. Should have been initialized with - :c:func:`uv_udp_init`. + :c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have + been bound to an address explicitly with :c:func:`uv_udp_bind`, or + implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`. :param interface_addr: interface address. @@ -318,7 +351,9 @@ API Set broadcast on or off. :param handle: UDP handle. Should have been initialized with - :c:func:`uv_udp_init`. + :c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have + been bound to an address explicitly with :c:func:`uv_udp_bind`, or + implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`. :param on: 1 for on, 0 for off. @@ -329,7 +364,9 @@ API Set the time to live. :param handle: UDP handle. Should have been initialized with - :c:func:`uv_udp_init`. + :c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have + been bound to an address explicitly with :c:func:`uv_udp_bind`, or + implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`. :param ttl: 1 through 255. diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h index a62b3fa69b1087..9e450c5110fe57 100644 --- a/deps/uv/include/uv.h +++ b/deps/uv/include/uv.h @@ -260,7 +260,9 @@ typedef struct uv_metrics_s uv_metrics_t; typedef enum { UV_LOOP_BLOCK_SIGNAL = 0, - UV_METRICS_IDLE_TIME + UV_METRICS_IDLE_TIME, + UV_LOOP_USE_IO_URING_SQPOLL +#define UV_LOOP_USE_IO_URING_SQPOLL UV_LOOP_USE_IO_URING_SQPOLL } uv_loop_option; typedef enum { @@ -604,7 +606,18 @@ UV_EXTERN int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable); enum uv_tcp_flags { /* Used with uv_tcp_bind, when an IPv6 address is used. */ - UV_TCP_IPV6ONLY = 1 + UV_TCP_IPV6ONLY = 1, + + /* Enable SO_REUSEPORT socket option when binding the handle. + * This allows completely duplicate bindings by multiple processes + * or threads if they all set SO_REUSEPORT before binding the port. + * Incoming connections are distributed across the participating + * listener sockets. + * + * This flag is available only on Linux 3.9+, DragonFlyBSD 3.6+, + * FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ for now. + */ + UV_TCP_REUSEPORT = 2, }; UV_EXTERN int uv_tcp_bind(uv_tcp_t* handle, @@ -645,10 +658,13 @@ enum uv_udp_flags { UV_UDP_PARTIAL = 2, /* * Indicates if SO_REUSEADDR will be set when binding the handle. - * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other - * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that - * multiple threads or processes can bind to the same address without error - * (provided they all set the flag) but only the last one to bind will receive + * This sets the SO_REUSEPORT socket flag on the BSDs (except for + * DragonFlyBSD), OS X, and other platforms where SO_REUSEPORTs don't + * have the capability of load balancing, as the opposite of what + * UV_UDP_REUSEPORT would do. On other Unix platforms, it sets the + * SO_REUSEADDR flag. What that means is that multiple threads or + * processes can bind to the same address without error (provided + * they all set the flag) but only the last one to bind will receive * any traffic, in effect "stealing" the port from the previous listener. */ UV_UDP_REUSEADDR = 4, @@ -671,6 +687,18 @@ enum uv_udp_flags { * This flag is no-op on platforms other than Linux. */ UV_UDP_LINUX_RECVERR = 32, + /* + * Indicates if SO_REUSEPORT will be set when binding the handle. + * This sets the SO_REUSEPORT socket option on supported platforms. + * Unlike UV_UDP_REUSEADDR, this flag will make multiple threads or + * processes that are binding to the same address and port "share" + * the port, which means incoming datagrams are distributed across + * the receiving sockets among threads or processes. + * + * This flag is available only on Linux 3.9+, DragonFlyBSD 3.6+, + * FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ for now. + */ + UV_UDP_REUSEPORT = 64, /* * Indicates that recvmmsg should be used, if available. */ @@ -1903,17 +1931,17 @@ struct uv_loop_s { UV_EXTERN void* uv_loop_get_data(const uv_loop_t*); UV_EXTERN void uv_loop_set_data(uv_loop_t*, void* data); -/* String utilities needed internally for dealing with Windows. */ -size_t uv_utf16_length_as_wtf8(const uint16_t* utf16, - ssize_t utf16_len); -int uv_utf16_to_wtf8(const uint16_t* utf16, - ssize_t utf16_len, - char** wtf8_ptr, - size_t* wtf8_len_ptr); -ssize_t uv_wtf8_length_as_utf16(const char* wtf8); -void uv_wtf8_to_utf16(const char* wtf8, - uint16_t* utf16, - size_t utf16_len); +/* Unicode utilities needed for dealing with Windows. */ +UV_EXTERN size_t uv_utf16_length_as_wtf8(const uint16_t* utf16, + ssize_t utf16_len); +UV_EXTERN int uv_utf16_to_wtf8(const uint16_t* utf16, + ssize_t utf16_len, + char** wtf8_ptr, + size_t* wtf8_len_ptr); +UV_EXTERN ssize_t uv_wtf8_length_as_utf16(const char* wtf8); +UV_EXTERN void uv_wtf8_to_utf16(const char* wtf8, + uint16_t* utf16, + size_t utf16_len); /* Don't export the private CPP symbols. */ #undef UV_HANDLE_TYPE_PRIVATE diff --git a/deps/uv/include/uv/tree.h b/deps/uv/include/uv/tree.h index 2b28835fdedef2..06bba084f38669 100644 --- a/deps/uv/include/uv/tree.h +++ b/deps/uv/include/uv/tree.h @@ -35,21 +35,7 @@ #endif /* - * This file defines data structures for different types of trees: - * splay trees and red-black trees. - * - * A splay tree is a self-organizing data structure. Every operation - * on the tree causes a splay to happen. The splay moves the requested - * node to the root of the tree and partly rebalances it. - * - * This has the benefit that request locality causes faster lookups as - * the requested nodes move to the top of the tree. On the other hand, - * every lookup causes memory writes. - * - * The Balance Theorem bounds the total access time for m operations - * and n inserts on an initially empty tree as O((m + n)lg n). The - * amortized cost for a sequence of m accesses to a splay tree is O(lg n); - * + * This file defines data structures for red-black trees. * A red-black tree is a binary search tree with the node color as an * extra attribute. It fulfills a set of conditions: * - every search path from the root to a leaf consists of the @@ -61,239 +47,6 @@ * The maximum height of a red-black tree is 2lg (n+1). */ -#define SPLAY_HEAD(name, type) \ -struct name { \ - struct type *sph_root; /* root of the tree */ \ -} - -#define SPLAY_INITIALIZER(root) \ - { NULL } - -#define SPLAY_INIT(root) do { \ - (root)->sph_root = NULL; \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_ENTRY(type) \ -struct { \ - struct type *spe_left; /* left element */ \ - struct type *spe_right; /* right element */ \ -} - -#define SPLAY_LEFT(elm, field) (elm)->field.spe_left -#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right -#define SPLAY_ROOT(head) (head)->sph_root -#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) - -/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ -#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ - SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ - SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ - (head)->sph_root = tmp; \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ - SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ - SPLAY_LEFT(tmp, field) = (head)->sph_root; \ - (head)->sph_root = tmp; \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_LINKLEFT(head, tmp, field) do { \ - SPLAY_LEFT(tmp, field) = (head)->sph_root; \ - tmp = (head)->sph_root; \ - (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_LINKRIGHT(head, tmp, field) do { \ - SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ - tmp = (head)->sph_root; \ - (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ - SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ - SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \ - SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ - SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ -} while (/*CONSTCOND*/ 0) - -/* Generates prototypes and inline functions */ - -#define SPLAY_PROTOTYPE(name, type, field, cmp) \ -void name##_SPLAY(struct name *, struct type *); \ -void name##_SPLAY_MINMAX(struct name *, int); \ -struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ -struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ - \ -/* Finds the node with the same key as elm */ \ -static __inline struct type * \ -name##_SPLAY_FIND(struct name *head, struct type *elm) \ -{ \ - if (SPLAY_EMPTY(head)) \ - return(NULL); \ - name##_SPLAY(head, elm); \ - if ((cmp)(elm, (head)->sph_root) == 0) \ - return (head->sph_root); \ - return (NULL); \ -} \ - \ -static __inline struct type * \ -name##_SPLAY_NEXT(struct name *head, struct type *elm) \ -{ \ - name##_SPLAY(head, elm); \ - if (SPLAY_RIGHT(elm, field) != NULL) { \ - elm = SPLAY_RIGHT(elm, field); \ - while (SPLAY_LEFT(elm, field) != NULL) { \ - elm = SPLAY_LEFT(elm, field); \ - } \ - } else \ - elm = NULL; \ - return (elm); \ -} \ - \ -static __inline struct type * \ -name##_SPLAY_MIN_MAX(struct name *head, int val) \ -{ \ - name##_SPLAY_MINMAX(head, val); \ - return (SPLAY_ROOT(head)); \ -} - -/* Main splay operation. - * Moves node close to the key of elm to top - */ -#define SPLAY_GENERATE(name, type, field, cmp) \ -struct type * \ -name##_SPLAY_INSERT(struct name *head, struct type *elm) \ -{ \ - if (SPLAY_EMPTY(head)) { \ - SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ - } else { \ - int __comp; \ - name##_SPLAY(head, elm); \ - __comp = (cmp)(elm, (head)->sph_root); \ - if(__comp < 0) { \ - SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \ - SPLAY_RIGHT(elm, field) = (head)->sph_root; \ - SPLAY_LEFT((head)->sph_root, field) = NULL; \ - } else if (__comp > 0) { \ - SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \ - SPLAY_LEFT(elm, field) = (head)->sph_root; \ - SPLAY_RIGHT((head)->sph_root, field) = NULL; \ - } else \ - return ((head)->sph_root); \ - } \ - (head)->sph_root = (elm); \ - return (NULL); \ -} \ - \ -struct type * \ -name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ -{ \ - struct type *__tmp; \ - if (SPLAY_EMPTY(head)) \ - return (NULL); \ - name##_SPLAY(head, elm); \ - if ((cmp)(elm, (head)->sph_root) == 0) { \ - if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ - (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ - } else { \ - __tmp = SPLAY_RIGHT((head)->sph_root, field); \ - (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ - name##_SPLAY(head, elm); \ - SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ - } \ - return (elm); \ - } \ - return (NULL); \ -} \ - \ -void \ -name##_SPLAY(struct name *head, struct type *elm) \ -{ \ - struct type __node, *__left, *__right, *__tmp; \ - int __comp; \ - \ - SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ - __left = __right = &__node; \ - \ - while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ - if (__comp < 0) { \ - __tmp = SPLAY_LEFT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if ((cmp)(elm, __tmp) < 0){ \ - SPLAY_ROTATE_RIGHT(head, __tmp, field); \ - if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ - break; \ - } \ - SPLAY_LINKLEFT(head, __right, field); \ - } else if (__comp > 0) { \ - __tmp = SPLAY_RIGHT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if ((cmp)(elm, __tmp) > 0){ \ - SPLAY_ROTATE_LEFT(head, __tmp, field); \ - if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ - break; \ - } \ - SPLAY_LINKRIGHT(head, __left, field); \ - } \ - } \ - SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ -} \ - \ -/* Splay with either the minimum or the maximum element \ - * Used to find minimum or maximum element in tree. \ - */ \ -void name##_SPLAY_MINMAX(struct name *head, int __comp) \ -{ \ - struct type __node, *__left, *__right, *__tmp; \ - \ - SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ - __left = __right = &__node; \ - \ - for (;;) { \ - if (__comp < 0) { \ - __tmp = SPLAY_LEFT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if (__comp < 0){ \ - SPLAY_ROTATE_RIGHT(head, __tmp, field); \ - if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ - break; \ - } \ - SPLAY_LINKLEFT(head, __right, field); \ - } else if (__comp > 0) { \ - __tmp = SPLAY_RIGHT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if (__comp > 0) { \ - SPLAY_ROTATE_LEFT(head, __tmp, field); \ - if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ - break; \ - } \ - SPLAY_LINKRIGHT(head, __left, field); \ - } \ - } \ - SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ -} - -#define SPLAY_NEGINF -1 -#define SPLAY_INF 1 - -#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) -#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) -#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) -#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) -#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ - : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) -#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ - : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) - -#define SPLAY_FOREACH(x, name, head) \ - for ((x) = SPLAY_MIN(name, head); \ - (x) != NULL; \ - (x) = SPLAY_NEXT(name, head, x)) - /* Macros that define a red-black tree */ #define RB_HEAD(name, type) \ struct name { \ @@ -730,8 +483,8 @@ name##_RB_MINMAX(struct name *head, int val) \ #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) #define RB_FIND(name, x, y) name##_RB_FIND(x, y) #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) -#define RB_NEXT(name, x, y) name##_RB_NEXT(y) -#define RB_PREV(name, x, y) name##_RB_PREV(y) +#define RB_NEXT(name, x) name##_RB_NEXT(x) +#define RB_PREV(name, x) name##_RB_PREV(x) #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) diff --git a/deps/uv/include/uv/version.h b/deps/uv/include/uv/version.h index d6a61a10f7c4b5..77a8b2541749f9 100644 --- a/deps/uv/include/uv/version.h +++ b/deps/uv/include/uv/version.h @@ -31,8 +31,8 @@ */ #define UV_VERSION_MAJOR 1 -#define UV_VERSION_MINOR 48 -#define UV_VERSION_PATCH 0 +#define UV_VERSION_MINOR 49 +#define UV_VERSION_PATCH 1 #define UV_VERSION_IS_RELEASE 1 #define UV_VERSION_SUFFIX "" diff --git a/deps/uv/include/uv/win.h b/deps/uv/include/uv/win.h index f4adaa216c6f0c..12ac53b4f217d2 100644 --- a/deps/uv/include/uv/win.h +++ b/deps/uv/include/uv/win.h @@ -290,8 +290,8 @@ typedef struct { #define UV_ONCE_INIT { 0, NULL } typedef struct uv_once_s { - unsigned char ran; - HANDLE event; + unsigned char unused; + INIT_ONCE init_once; } uv_once_t; /* Platform-specific definitions for uv_spawn support. */ diff --git a/deps/uv/src/random.c b/deps/uv/src/random.c index e75f77deb2bdaf..57fc0d911da316 100644 --- a/deps/uv/src/random.c +++ b/deps/uv/src/random.c @@ -82,7 +82,7 @@ static void uv__random_done(struct uv__work* w, int status) { uv_random_t* req; req = container_of(w, uv_random_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); if (status == 0) status = req->status; diff --git a/deps/uv/src/threadpool.c b/deps/uv/src/threadpool.c index dbef67f2f10f1d..45af50dcd04ea6 100644 --- a/deps/uv/src/threadpool.c +++ b/deps/uv/src/threadpool.c @@ -356,7 +356,7 @@ static void uv__queue_done(struct uv__work* w, int err) { uv_work_t* req; req = container_of(w, uv_work_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); if (req->after_work_cb == NULL) return; diff --git a/deps/uv/src/unix/async.c b/deps/uv/src/unix/async.c index 0ff2669e30a628..bc97ec54c4fcc6 100644 --- a/deps/uv/src/unix/async.c +++ b/deps/uv/src/unix/async.c @@ -38,6 +38,34 @@ #include #endif +#if UV__KQUEUE_EVFILT_USER +static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT; +static int kqueue_evfilt_user_support = 1; + + +static void uv__kqueue_runtime_detection(void) { + int kq; + struct kevent ev[2]; + struct timespec timeout = {0, 0}; + + /* Perform the runtime detection to ensure that kqueue with + * EVFILT_USER actually works. */ + kq = kqueue(); + EV_SET(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER, + EV_ADD | EV_CLEAR, 0, 0, 0); + EV_SET(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER, + 0, NOTE_TRIGGER, 0, 0); + if (kevent(kq, ev, 2, ev, 1, &timeout) < 1 || + ev[0].filter != EVFILT_USER || + ev[0].ident != UV__KQUEUE_EVFILT_USER_IDENT || + ev[0].flags & EV_ERROR) + /* If we wind up here, we can assume that EVFILT_USER is defined but + * broken on the current system. */ + kqueue_evfilt_user_support = 0; + uv__close(kq); +} +#endif + static void uv__async_send(uv_loop_t* loop); static int uv__async_start(uv_loop_t* loop); static void uv__cpu_relax(void); @@ -130,8 +158,10 @@ void uv__async_close(uv_async_t* handle) { static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { +#ifndef __linux__ char buf[1024]; ssize_t r; +#endif struct uv__queue queue; struct uv__queue* q; uv_async_t* h; @@ -139,7 +169,12 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { assert(w == &loop->async_io_watcher); +#ifndef __linux__ +#if UV__KQUEUE_EVFILT_USER + for (;!kqueue_evfilt_user_support;) { +#else for (;;) { +#endif r = read(w->fd, buf, sizeof(buf)); if (r == sizeof(buf)) @@ -156,6 +191,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { abort(); } +#endif /* !__linux__ */ uv__queue_move(&loop->async_handles, &queue); while (!uv__queue_empty(&queue)) { @@ -179,34 +215,58 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { static void uv__async_send(uv_loop_t* loop) { - const void* buf; - ssize_t len; int fd; - int r; - - buf = ""; - len = 1; - fd = loop->async_wfd; - -#if defined(__linux__) - if (fd == -1) { - static const uint64_t val = 1; - buf = &val; - len = sizeof(val); - fd = loop->async_io_watcher.fd; /* eventfd */ + ssize_t r; +#ifdef __linux__ + uint64_t val; + + fd = loop->async_io_watcher.fd; /* eventfd */ + for (val = 1; /* empty */; val = 1) { + r = write(fd, &val, sizeof(uint64_t)); + if (r < 0) { + /* When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit. + * We need to first drain the eventfd and then write again. + * + * Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details. + */ + if (errno == EAGAIN) { + /* It's ready to retry. */ + if (read(fd, &val, sizeof(uint64_t)) > 0 || errno == EAGAIN) { + continue; + } + } + /* Unknown error occurs. */ + break; + } + return; + } +#else +#if UV__KQUEUE_EVFILT_USER + struct kevent ev; + + if (kqueue_evfilt_user_support) { + fd = loop->async_io_watcher.fd; /* magic number for EVFILT_USER */ + EV_SET(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0); + r = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL); + if (r == 0) + return; + else + abort(); } #endif + fd = loop->async_wfd; /* write end of the pipe */ do - r = write(fd, buf, len); + r = write(fd, "x", 1); while (r == -1 && errno == EINTR); - if (r == len) + if (r == 1) return; if (r == -1) if (errno == EAGAIN || errno == EWOULDBLOCK) return; +#endif abort(); } @@ -215,6 +275,9 @@ static void uv__async_send(uv_loop_t* loop) { static int uv__async_start(uv_loop_t* loop) { int pipefd[2]; int err; +#if UV__KQUEUE_EVFILT_USER + struct kevent ev; +#endif if (loop->async_io_watcher.fd != -1) return 0; @@ -226,6 +289,36 @@ static int uv__async_start(uv_loop_t* loop) { pipefd[0] = err; pipefd[1] = -1; +#elif UV__KQUEUE_EVFILT_USER + uv_once(&kqueue_runtime_detection_guard, uv__kqueue_runtime_detection); + if (kqueue_evfilt_user_support) { + /* In order not to break the generic pattern of I/O polling, a valid + * file descriptor is required to take up a room in loop->watchers, + * thus we create one for that, but this fd will not be actually used, + * it's just a placeholder and magic number which is going to be closed + * during the cleanup, as other FDs. */ + err = uv__open_cloexec("/dev/null", O_RDONLY); + if (err < 0) + return err; + + pipefd[0] = err; + pipefd[1] = -1; + + /* When using EVFILT_USER event to wake up the kqueue, this event must be + * registered beforehand. Otherwise, calling kevent() to issue an + * unregistered EVFILT_USER event will get an ENOENT. + * Since uv__async_send() may happen before uv__io_poll() with multi-threads, + * we can't defer this registration of EVFILT_USER event as we did for other + * events, but must perform it right away. */ + EV_SET(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, 0); + err = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL); + if (err < 0) + return UV__ERR(errno); + } else { + err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE); + if (err < 0) + return err; + } #else err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE); if (err < 0) @@ -236,6 +329,13 @@ static int uv__async_start(uv_loop_t* loop) { uv__io_start(loop, &loop->async_io_watcher, POLLIN); loop->async_wfd = pipefd[1]; +#if UV__KQUEUE_EVFILT_USER + /* Prevent the EVFILT_USER event from being added to kqueue redundantly + * and mistakenly later in uv__io_poll(). */ + if (kqueue_evfilt_user_support) + loop->async_io_watcher.events = loop->async_io_watcher.pevents; +#endif + return 0; } diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c index 965e7f775250cf..0c52ccf2ad7b2d 100644 --- a/deps/uv/src/unix/core.c +++ b/deps/uv/src/unix/core.c @@ -53,7 +53,8 @@ #if defined(__APPLE__) # include -# endif /* defined(__APPLE__) */ +# include +#endif /* defined(__APPLE__) */ #if defined(__APPLE__) && !TARGET_OS_IPHONE @@ -94,6 +95,15 @@ extern char** environ; # define uv__accept4 accept4 #endif +#if defined(__FreeBSD__) +# include +# include +#endif + +#if defined(__NetBSD__) +# include +#endif + #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__) # include #endif @@ -156,7 +166,7 @@ void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { break; case UV_TTY: - uv__stream_close((uv_stream_t*)handle); + uv__tty_close((uv_tty_t*)handle); break; case UV_TCP: @@ -1024,7 +1034,7 @@ int uv_getrusage(uv_rusage_t* rusage) { #if defined(__APPLE__) rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */ #elif defined(__sun) - rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */ + rusage->ru_maxrss *= getpagesize() / 1024; /* Solaris reports pages. */ #endif return 0; @@ -1616,6 +1626,7 @@ static int set_nice_for_calling_thread(int priority) { * If the function fails, the return value is non-zero. */ int uv_thread_setpriority(uv_thread_t tid, int priority) { +#if !defined(__GNU__) int r; int min; int max; @@ -1677,10 +1688,14 @@ int uv_thread_setpriority(uv_thread_t tid, int priority) { param.sched_priority = prio; r = pthread_setschedparam(tid, policy, ¶m); if (r != 0) - return UV__ERR(errno); + return UV__ERR(errno); } return 0; +#else /* !defined(__GNU__) */ + /* Simulate success on systems where thread priority is not implemented. */ + return 0; +#endif /* !defined(__GNU__) */ } int uv_os_uname(uv_utsname_t* buffer) { @@ -1864,11 +1879,31 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) { return UV_EINVAL; } +#if defined(__linux__) || defined (__FreeBSD__) +# define uv__cpu_count(cpuset) CPU_COUNT(cpuset) +#elif defined(__NetBSD__) +static int uv__cpu_count(cpuset_t* set) { + int rc; + cpuid_t i; + + rc = 0; + for (i = 0;; i++) { + int r = cpuset_isset(i, set); + if (r < 0) + break; + if (r) + rc++; + } + + return rc; +} +#endif /* __NetBSD__ */ unsigned int uv_available_parallelism(void) { + long rc = -1; + #ifdef __linux__ cpu_set_t set; - long rc; memset(&set, 0, sizeof(set)); @@ -1877,29 +1912,127 @@ unsigned int uv_available_parallelism(void) { * before falling back to sysconf(_SC_NPROCESSORS_ONLN). */ if (0 == sched_getaffinity(0, sizeof(set), &set)) - rc = CPU_COUNT(&set); - else - rc = sysconf(_SC_NPROCESSORS_ONLN); - - if (rc < 1) - rc = 1; - - return (unsigned) rc; + rc = uv__cpu_count(&set); #elif defined(__MVS__) - int rc; - rc = __get_num_online_cpus(); if (rc < 1) rc = 1; return (unsigned) rc; -#else /* __linux__ */ - long rc; +#elif defined(__FreeBSD__) + cpuset_t set; + + memset(&set, 0, sizeof(set)); + + if (0 == cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(set), &set)) + rc = uv__cpu_count(&set); +#elif defined(__NetBSD__) + cpuset_t* set = cpuset_create(); + if (set != NULL) { + if (0 == sched_getaffinity_np(getpid(), sizeof(set), &set)) + rc = uv__cpu_count(&set); + cpuset_destroy(set); + } +#elif defined(__APPLE__) + int nprocs; + size_t i; + size_t len = sizeof(nprocs); + static const char *mib[] = { + "hw.activecpu", + "hw.logicalcpu", + "hw.ncpu" + }; + + for (i = 0; i < ARRAY_SIZE(mib); i++) { + if (0 == sysctlbyname(mib[i], &nprocs, &len, NULL, 0) && + len == sizeof(nprocs) && + nprocs > 0) { + rc = nprocs; + break; + } + } +#elif defined(__OpenBSD__) + int nprocs; + size_t i; + size_t len = sizeof(nprocs); + static int mib[][2] = { +# ifdef HW_NCPUONLINE + { CTL_HW, HW_NCPUONLINE }, +# endif + { CTL_HW, HW_NCPU } + }; + + for (i = 0; i < ARRAY_SIZE(mib); i++) { + if (0 == sysctl(mib[i], ARRAY_SIZE(mib[i]), &nprocs, &len, NULL, 0) && + len == sizeof(nprocs) && + nprocs > 0) { + rc = nprocs; + break; + } + } +#endif /* __linux__ */ + + if (rc < 0) + rc = sysconf(_SC_NPROCESSORS_ONLN); + +#ifdef __linux__ + { + double rc_with_cgroup; + uv__cpu_constraint c = {0, 0, 0.0}; + + if (uv__get_constrained_cpu(&c) == 0 && c.period_length > 0) { + rc_with_cgroup = (double)c.quota_per_period / c.period_length * c.proportions; + if (rc_with_cgroup < rc) + rc = (long)rc_with_cgroup; /* Casting is safe since rc_with_cgroup < rc < LONG_MAX */ + } + } +#endif /* __linux__ */ - rc = sysconf(_SC_NPROCESSORS_ONLN); if (rc < 1) rc = 1; return (unsigned) rc; -#endif /* __linux__ */ +} + +int uv__sock_reuseport(int fd) { + int on = 1; +#if defined(__FreeBSD__) && __FreeBSD__ >= 12 && defined(SO_REUSEPORT_LB) + /* FreeBSD 12 introduced a new socket option named SO_REUSEPORT_LB + * with the capability of load balancing, it's the substitution of + * the SO_REUSEPORTs on Linux and DragonFlyBSD. */ + if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT_LB, &on, sizeof(on))) + return UV__ERR(errno); +#elif (defined(__linux__) || \ + defined(_AIX73) || \ + (defined(__DragonFly__) && __DragonFly_version >= 300600) || \ + (defined(UV__SOLARIS_11_4) && UV__SOLARIS_11_4)) && \ + defined(SO_REUSEPORT) + /* On Linux 3.9+, the SO_REUSEPORT implementation distributes connections + * evenly across all of the threads (or processes) that are blocked in + * accept() on the same port. As with TCP, SO_REUSEPORT distributes datagrams + * evenly across all of the receiving threads (or process). + * + * DragonFlyBSD 3.6.0 extended SO_REUSEPORT to distribute workload to + * available sockets, which made it the equivalent of Linux's SO_REUSEPORT. + * + * AIX 7.2.5 added the feature that would add the capability to distribute + * incoming connections or datagrams across all listening ports for SO_REUSEPORT. + * + * Solaris 11 supported SO_REUSEPORT, but it's implemented only for + * binding to the same address and port, without load balancing. + * Solaris 11.4 extended SO_REUSEPORT with the capability of load balancing. + */ + if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) + return UV__ERR(errno); +#else + (void) (fd); + (void) (on); + /* SO_REUSEPORTs do not have the capability of load balancing on platforms + * other than those mentioned above. The semantics are completely different, + * therefore we shouldn't enable it, but fail this operation to indicate that + * UV_[TCP/UDP]_REUSEPORT is not supported on these platforms. */ + return UV_ENOTSUP; +#endif + + return 0; } diff --git a/deps/uv/src/unix/cygwin.c b/deps/uv/src/unix/cygwin.c index 4e5413963d6acf..4913108223f4da 100644 --- a/deps/uv/src/unix/cygwin.c +++ b/deps/uv/src/unix/cygwin.c @@ -36,9 +36,45 @@ int uv_uptime(double* uptime) { } int uv_resident_set_memory(size_t* rss) { - /* FIXME: read /proc/meminfo? */ - *rss = 0; + char buf[1024]; + const char* s; + long val; + int rc; + int i; + struct sysinfo si; + + /* rss: 24th element */ + rc = uv__slurp("/proc/self/stat", buf, sizeof(buf)); + if (rc < 0) + return rc; + + /* find the last ')' */ + s = strrchr(buf, ')'); + if (s == NULL) + goto err; + + for (i = 1; i <= 22; i++) { + s = strchr(s + 1, ' '); + if (s == NULL) + goto err; + } + + errno = 0; + val = strtol(s, NULL, 10); + if (val < 0 || errno != 0) + goto err; + + do + rc = sysinfo(&si); + while (rc == -1 && errno == EINTR); + if (rc == -1) + return UV__ERR(errno); + + *rss = val * si.mem_unit; return 0; + +err: + return UV_EINVAL; } int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { diff --git a/deps/uv/src/unix/darwin-syscalls.h b/deps/uv/src/unix/darwin-syscalls.h new file mode 100644 index 00000000000000..dc2d1bd234b1f1 --- /dev/null +++ b/deps/uv/src/unix/darwin-syscalls.h @@ -0,0 +1,17 @@ +#ifndef UV_DARWIN_SYSCALLS_H_ +#define UV_DARWIN_SYSCALLS_H_ + +#include +#include + +/* https://github.com/apple/darwin-xnu/blob/master/bsd/sys/socket.h */ + +struct mmsghdr { + struct msghdr msg_hdr; + size_t msg_len; +}; + +ssize_t recvmsg_x(int s, const struct mmsghdr* msgp, u_int cnt, int flags); +ssize_t sendmsg_x(int s, const struct mmsghdr* msgp, u_int cnt, int flags); + +#endif /* UV_DARWIN_SYSCALLS_H_ */ diff --git a/deps/uv/src/unix/darwin.c b/deps/uv/src/unix/darwin.c index 5e764a65ee4c71..009efbefaa70ee 100644 --- a/deps/uv/src/unix/darwin.c +++ b/deps/uv/src/unix/darwin.c @@ -25,7 +25,6 @@ #include #include -#include #include #include #include /* _NSGetExecutablePath */ @@ -34,7 +33,6 @@ #include /* sysconf */ static uv_once_t once = UV_ONCE_INIT; -static uint64_t (*time_func)(void); static mach_timebase_info_data_t timebase; @@ -56,16 +54,12 @@ void uv__platform_loop_delete(uv_loop_t* loop) { static void uv__hrtime_init_once(void) { if (KERN_SUCCESS != mach_timebase_info(&timebase)) abort(); - - time_func = (uint64_t (*)(void)) dlsym(RTLD_DEFAULT, "mach_continuous_time"); - if (time_func == NULL) - time_func = mach_absolute_time; } uint64_t uv__hrtime(uv_clocktype_t type) { uv_once(&once, uv__hrtime_init_once); - return time_func() * timebase.numer / timebase.denom; + return mach_continuous_time() * timebase.numer / timebase.denom; } diff --git a/deps/uv/src/unix/freebsd.c b/deps/uv/src/unix/freebsd.c index 191bc8bc213ffd..a6de29c558cde4 100644 --- a/deps/uv/src/unix/freebsd.c +++ b/deps/uv/src/unix/freebsd.c @@ -26,7 +26,12 @@ #include #include -#include +#if defined(__DragonFly__) +# include +# include +#else +# include +#endif #include #include #include diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c index 3a74350f0e5ab6..239ecda16a7eb9 100644 --- a/deps/uv/src/unix/fs.c +++ b/deps/uv/src/unix/fs.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -82,17 +83,6 @@ # include #endif -#if defined(__CYGWIN__) || \ - (defined(__HAIKU__) && B_HAIKU_VERSION < B_HAIKU_VERSION_1_PRE_BETA_5) || \ - (defined(__sun) && !defined(__illumos__)) || \ - (defined(__APPLE__) && !TARGET_OS_IPHONE && \ - MAC_OS_X_VERSION_MIN_REQUIRED < 110000) -#define preadv(fd, bufs, nbufs, off) \ - pread(fd, (bufs)->iov_base, (bufs)->iov_len, off) -#define pwritev(fd, bufs, nbufs, off) \ - pwrite(fd, (bufs)->iov_base, (bufs)->iov_len, off) -#endif - #if defined(_AIX) && _XOPEN_SOURCE <= 600 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */ #endif @@ -149,7 +139,7 @@ extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */ #define POST \ do { \ if (cb != NULL) { \ - uv__req_register(loop, req); \ + uv__req_register(loop); \ uv__work_submit(loop, \ &req->work_req, \ UV__WORK_FAST_IO, \ @@ -406,6 +396,123 @@ static ssize_t uv__fs_open(uv_fs_t* req) { } +static ssize_t uv__preadv_or_pwritev_emul(int fd, + const struct iovec* bufs, + size_t nbufs, + off_t off, + int is_pread) { + ssize_t total; + ssize_t r; + size_t i; + size_t n; + void* p; + + total = 0; + for (i = 0; i < (size_t) nbufs; i++) { + p = bufs[i].iov_base; + n = bufs[i].iov_len; + + do + if (is_pread) + r = pread(fd, p, n, off); + else + r = pwrite(fd, p, n, off); + while (r == -1 && errno == EINTR); + + if (r == -1) { + if (total > 0) + return total; + return -1; + } + + off += r; + total += r; + + if ((size_t) r < n) + return total; + } + + return total; +} + + +#ifdef __linux__ +typedef int uv__iovcnt; +#else +typedef size_t uv__iovcnt; +#endif + + +static ssize_t uv__preadv_emul(int fd, + const struct iovec* bufs, + uv__iovcnt nbufs, + off_t off) { + return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1); +} + + +static ssize_t uv__pwritev_emul(int fd, + const struct iovec* bufs, + uv__iovcnt nbufs, + off_t off) { + return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0); +} + + +/* The function pointer cache is an uintptr_t because _Atomic void* + * doesn't work on macos/ios/etc... + * Disable optimization on armv7 to work around the bug described in + * https://github.com/libuv/libuv/issues/4532 + */ +#if defined(__arm__) && (__ARM_ARCH == 7) +__attribute__((optimize("O0"))) +#endif +static ssize_t uv__preadv_or_pwritev(int fd, + const struct iovec* bufs, + size_t nbufs, + off_t off, + _Atomic uintptr_t* cache, + int is_pread) { + ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t); + void* p; + + p = (void*) atomic_load_explicit(cache, memory_order_relaxed); + if (p == NULL) { +#ifdef RTLD_DEFAULT + p = dlsym(RTLD_DEFAULT, is_pread ? "preadv" : "pwritev"); + dlerror(); /* Clear errors. */ +#endif /* RTLD_DEFAULT */ + if (p == NULL) + p = is_pread ? uv__preadv_emul : uv__pwritev_emul; + atomic_store_explicit(cache, (uintptr_t) p, memory_order_relaxed); + } + + /* Use memcpy instead of `f = p` to work around a compiler bug, + * see https://github.com/libuv/libuv/issues/4532 + */ + memcpy(&f, &p, sizeof(p)); + return f(fd, bufs, nbufs, off); +} + + +static ssize_t uv__preadv(int fd, + const struct iovec* bufs, + size_t nbufs, + off_t off) { + static _Atomic uintptr_t cache; + return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/1); +} + + +static ssize_t uv__pwritev(int fd, + const struct iovec* bufs, + size_t nbufs, + off_t off) { + static _Atomic uintptr_t cache; + return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0); +} + + static ssize_t uv__fs_read(uv_fs_t* req) { const struct iovec* bufs; unsigned int iovmax; @@ -433,7 +540,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) { if (nbufs == 1) r = pread(fd, bufs->iov_base, bufs->iov_len, off); else if (nbufs > 1) - r = preadv(fd, bufs, nbufs, off); + r = uv__preadv(fd, bufs, nbufs, off); } #ifdef __PASE__ @@ -691,14 +798,23 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) { static ssize_t uv__fs_realpath(uv_fs_t* req) { char* buf; + char* tmp; #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L - buf = realpath(req->path, NULL); - if (buf == NULL) + tmp = realpath(req->path, NULL); + if (tmp == NULL) + return -1; + buf = uv__strdup(tmp); + free(tmp); /* _Not_ uv__free. */ + if (buf == NULL) { + errno = ENOMEM; return -1; + } #else ssize_t len; + (void)tmp; + len = uv__fs_pathmax_size(req->path); buf = uv__malloc(len + 1); @@ -962,7 +1078,10 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) { return -1; } -#elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) +/* sendfile() on iOS(arm64) will throw SIGSYS signal cause crash. */ +#elif (defined(__APPLE__) && !TARGET_OS_IPHONE) \ + || defined(__DragonFly__) \ + || defined(__FreeBSD__) { off_t len; ssize_t r; @@ -1112,7 +1231,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) { if (nbufs == 1) r = pwrite(fd, bufs->iov_base, bufs->iov_len, off); else if (nbufs > 1) - r = pwritev(fd, bufs, nbufs, off); + r = uv__pwritev(fd, bufs, nbufs, off); } return r; @@ -1125,6 +1244,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { uv_file dstfd; struct stat src_statsbuf; struct stat dst_statsbuf; + struct timespec times[2]; int dst_flags; int result; int err; @@ -1202,6 +1322,35 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { } } + /** + * Change the timestamps of the destination file to match the source file. + */ +#if defined(__APPLE__) + times[0] = src_statsbuf.st_atimespec; + times[1] = src_statsbuf.st_mtimespec; +#elif defined(_AIX) + times[0].tv_sec = src_statsbuf.st_atime; + times[0].tv_nsec = src_statsbuf.st_atime_n; + times[1].tv_sec = src_statsbuf.st_mtime; + times[1].tv_nsec = src_statsbuf.st_mtime_n; +#else + times[0] = src_statsbuf.st_atim; + times[1] = src_statsbuf.st_mtim; +#endif + + if (futimens(dstfd, times) == -1) { + err = UV__ERR(errno); + goto out; + } + + /* + * Change the ownership and permissions of the destination file to match the + * source file. + * `cp -p` does not care about errors here, so we don't either. Reuse the + * `result` variable to silence a -Wunused-result warning. + */ + result = fchown(dstfd, src_statsbuf.st_uid, src_statsbuf.st_gid); + if (fchmod(dstfd, src_statsbuf.st_mode) == -1) { err = UV__ERR(errno); #ifdef __linux__ @@ -1619,7 +1768,7 @@ static void uv__fs_done(struct uv__work* w, int status) { uv_fs_t* req; req = container_of(w, uv_fs_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); if (status == UV_ECANCELED) { assert(req->result == 0); @@ -1631,7 +1780,7 @@ static void uv__fs_done(struct uv__work* w, int status) { void uv__fs_post(uv_loop_t* loop, uv_fs_t* req) { - uv__req_register(loop, req); + uv__req_register(loop); uv__work_submit(loop, &req->work_req, UV__WORK_FAST_IO, @@ -1766,6 +1915,9 @@ int uv_fs_ftruncate(uv_loop_t* loop, INIT(FTRUNCATE); req->file = file; req->off = off; + if (cb != NULL) + if (uv__iou_fs_ftruncate(loop, req)) + return 0; POST; } diff --git a/deps/uv/src/unix/fsevents.c b/deps/uv/src/unix/fsevents.c index df703f3635fc95..7fb6bb2ec36ae0 100644 --- a/deps/uv/src/unix/fsevents.c +++ b/deps/uv/src/unix/fsevents.c @@ -276,10 +276,6 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, path += handle->realpath_len; len -= handle->realpath_len; - /* Ignore events with path equal to directory itself */ - if (len <= 1 && (flags & kFSEventStreamEventFlagItemIsDir)) - continue; - if (len == 0) { /* Since we're using fsevents to watch the file itself, * realpath == path, and we now need to get the basename of the file back @@ -793,6 +789,7 @@ int uv__cf_loop_signal(uv_loop_t* loop, /* Runs in UV loop to initialize handle */ int uv__fsevents_init(uv_fs_event_t* handle) { + char* buf; int err; uv__cf_loop_state_t* state; @@ -801,9 +798,13 @@ int uv__fsevents_init(uv_fs_event_t* handle) { return err; /* Get absolute path to file */ - handle->realpath = realpath(handle->path, NULL); - if (handle->realpath == NULL) + buf = realpath(handle->path, NULL); + if (buf == NULL) return UV__ERR(errno); + handle->realpath = uv__strdup(buf); + free(buf); /* _Not_ uv__free. */ + if (handle->realpath == NULL) + return UV_ENOMEM; handle->realpath_len = strlen(handle->realpath); /* Initialize event queue */ diff --git a/deps/uv/src/unix/getaddrinfo.c b/deps/uv/src/unix/getaddrinfo.c index 77337ace9454e0..b7075343666590 100644 --- a/deps/uv/src/unix/getaddrinfo.c +++ b/deps/uv/src/unix/getaddrinfo.c @@ -109,7 +109,7 @@ static void uv__getaddrinfo_done(struct uv__work* w, int status) { uv_getaddrinfo_t* req; req = container_of(w, uv_getaddrinfo_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); /* See initialization in uv_getaddrinfo(). */ if (req->hints) diff --git a/deps/uv/src/unix/getnameinfo.c b/deps/uv/src/unix/getnameinfo.c index 991002a67d7072..959b4c6a821980 100644 --- a/deps/uv/src/unix/getnameinfo.c +++ b/deps/uv/src/unix/getnameinfo.c @@ -58,7 +58,7 @@ static void uv__getnameinfo_done(struct uv__work* w, int status) { char* service; req = container_of(w, uv_getnameinfo_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); host = service = NULL; if (status == UV_ECANCELED) { diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h index bcb3be577e5849..568a55b55acb35 100644 --- a/deps/uv/src/unix/internal.h +++ b/deps/uv/src/unix/internal.h @@ -35,6 +35,10 @@ #include #include #include +#if defined(__APPLE__) || defined(__DragonFly__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) +#include +#endif #define uv__msan_unpoison(p, n) \ do { \ @@ -71,8 +75,11 @@ # include #endif /* _AIX */ -#if defined(__APPLE__) && !TARGET_OS_IPHONE -# include +#if defined(__APPLE__) +# include "darwin-syscalls.h" +# if !TARGET_OS_IPHONE +# include +# endif #endif /* @@ -157,7 +164,8 @@ typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t; /* loop flags */ enum { UV_LOOP_BLOCK_SIGPROF = 0x1, - UV_LOOP_REAP_CHILDREN = 0x2 + UV_LOOP_REAP_CHILDREN = 0x2, + UV_LOOP_ENABLE_IO_URING_SQPOLL = 0x4 }; /* flags of excluding ifaddr */ @@ -243,6 +251,7 @@ int uv__close(int fd); /* preserves errno */ int uv__close_nocheckstdio(int fd); int uv__close_nocancel(int fd); int uv__socket(int domain, int type, int protocol); +int uv__sock_reuseport(int fd); ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags); void uv__make_close_pending(uv_handle_t* handle); int uv__getiovmax(void); @@ -287,6 +296,9 @@ int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb); int uv__tcp_nodelay(int fd, int on); int uv__tcp_keepalive(int fd, int on, unsigned int delay); +/* tty */ +void uv__tty_close(uv_tty_t* handle); + /* pipe */ int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb); @@ -332,6 +344,7 @@ int uv__random_sysctl(void* buf, size_t buflen); /* io_uring */ #ifdef __linux__ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req); +int uv__iou_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req); int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, uv_fs_t* req, uint32_t fsync_flags); @@ -350,6 +363,7 @@ int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req); int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req); #else #define uv__iou_fs_close(loop, req) 0 +#define uv__iou_fs_ftruncate(loop, req) 0 #define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0 #define uv__iou_fs_link(loop, req) 0 #define uv__iou_fs_mkdir(loop, req) 0 @@ -472,4 +486,44 @@ uv__fs_copy_file_range(int fd_in, #define UV__CPU_AFFINITY_SUPPORTED 0 #endif +#ifdef __linux__ +typedef struct { + long long quota_per_period; + long long period_length; + double proportions; +} uv__cpu_constraint; + +int uv__get_constrained_cpu(uv__cpu_constraint* constraint); +#endif + +#if defined(__sun) && !defined(__illumos__) +#ifdef SO_FLOW_NAME +/* Since it's impossible to detect the Solaris 11.4 version via OS macros, + * so we check the presence of the socket option SO_FLOW_NAME that was first + * introduced to Solaris 11.4 and define a custom macro for determining 11.4. + */ +#define UV__SOLARIS_11_4 (1) +#else +#define UV__SOLARIS_11_4 (0) +#endif +#endif + +#if defined(EVFILT_USER) && defined(NOTE_TRIGGER) +/* EVFILT_USER is available since OS X 10.6, DragonFlyBSD 4.0, + * FreeBSD 8.1, and NetBSD 10.0. + * + * Note that even though EVFILT_USER is defined on the current system, + * it may still fail to work at runtime somehow. In that case, we fall + * back to pipe-based signaling. + */ +#define UV__KQUEUE_EVFILT_USER 1 +/* Magic number of identifier used for EVFILT_USER during runtime detection. + * There are no Google hits for this number when I create it. That way, + * people will be directed here if this number gets printed due to some + * kqueue error and they google for help. */ +#define UV__KQUEUE_EVFILT_USER_IDENT 0x1e7e7711 +#else +#define UV__KQUEUE_EVFILT_USER 0 +#endif + #endif /* UV_UNIX_INTERNAL_H_ */ diff --git a/deps/uv/src/unix/kqueue.c b/deps/uv/src/unix/kqueue.c index 4d09edc06a0972..876b717086c609 100644 --- a/deps/uv/src/unix/kqueue.c +++ b/deps/uv/src/unix/kqueue.c @@ -99,6 +99,39 @@ int uv__io_fork(uv_loop_t* loop) { int uv__io_check_fd(uv_loop_t* loop, int fd) { struct kevent ev; int rc; + struct stat sb; +#ifdef __APPLE__ + char path[MAXPATHLEN]; +#endif + + if (uv__fstat(fd, &sb)) + return UV__ERR(errno); + + /* On FreeBSD, kqueue only supports EVFILT_READ notification for regular files + * and always reports ready events for writing, resulting in busy-looping. + * + * On Darwin, DragonFlyBSD, NetBSD and OpenBSD, kqueue reports ready events for + * regular files as readable and writable only once, acting like an EV_ONESHOT. + * + * Neither of the above cases should be added to the kqueue. + */ + if (S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode)) + return UV_EINVAL; + +#ifdef __APPLE__ + /* On Darwin (both macOS and iOS), in addition to regular files, FIFOs also don't + * work properly with kqueue: the disconnection from the last writer won't trigger + * an event for kqueue in spite of what the man pages say. Thus, we also disallow + * the case of S_IFIFO. */ + if (S_ISFIFO(sb.st_mode)) { + /* File descriptors of FIFO, pipe and kqueue share the same type of file, + * therefore there is no way to tell them apart via stat.st_mode&S_IFMT. + * Fortunately, FIFO is the only one that has a persisted file on filesystem, + * from which we're able to make the distinction for it. */ + if (!fcntl(fd, F_GETPATH, path)) + return UV_EINVAL; + } +#endif rc = 0; EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0); @@ -334,6 +367,17 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { continue; } +#if UV__KQUEUE_EVFILT_USER + if (ev->filter == EVFILT_USER) { + w = &loop->async_io_watcher; + assert(fd == w->fd); + uv__metrics_update_idle_time(loop); + w->cb(loop, w, w->events); + nevents++; + continue; + } +#endif + if (ev->filter == EVFILT_VNODE) { assert(w->events == POLLIN); assert(w->pevents == POLLIN); diff --git a/deps/uv/src/unix/linux.c b/deps/uv/src/unix/linux.c index 5a0800e112e4f9..803a9a9d3f04c9 100644 --- a/deps/uv/src/unix/linux.c +++ b/deps/uv/src/unix/linux.c @@ -126,6 +126,7 @@ enum { UV__IORING_SETUP_SQPOLL = 2u, + UV__IORING_SETUP_NO_SQARRAY = 0x10000u, }; enum { @@ -147,6 +148,7 @@ enum { UV__IORING_OP_MKDIRAT = 37, UV__IORING_OP_SYMLINKAT = 38, UV__IORING_OP_LINKAT = 39, + UV__IORING_OP_FTRUNCATE = 55, }; enum { @@ -159,10 +161,6 @@ enum { UV__IORING_SQ_CQ_OVERFLOW = 2u, }; -enum { - UV__MKDIRAT_SYMLINKAT_LINKAT = 1u, -}; - struct uv__io_cqring_offsets { uint32_t head; uint32_t tail; @@ -475,8 +473,16 @@ static int uv__use_io_uring(void) { use = atomic_load_explicit(&use_io_uring, memory_order_relaxed); if (use == 0) { - /* Disable io_uring by default due to CVE-2024-22017. */ - use = -1; + use = uv__kernel_version() >= +#if defined(__hppa__) + /* io_uring first supported on parisc in 6.1, functional in .51 */ + /* https://lore.kernel.org/all/cb912694-b1fe-dbb0-4d8c-d608f3526905@gmx.de/ */ + /* 6.1.51 */ 0x060133 +#else + /* Older kernels have a bug where the sqpoll thread uses 100% CPU. */ + /* 5.10.186 */ 0x050ABA +#endif + ? 1 : -1; /* But users can still enable it if they so desire. */ val = getenv("UV_USE_IO_URING"); @@ -491,14 +497,6 @@ static int uv__use_io_uring(void) { } -UV_EXTERN int uv__node_patch_is_using_io_uring(void) { - // This function exists only in the modified copy of libuv in the Node.js - // repository. Node.js checks if this function exists and, if it does, uses it - // to determine whether libuv is using io_uring or not. - return uv__use_io_uring(); -} - - static void uv__iou_init(int epollfd, struct uv__iou* iou, uint32_t entries, @@ -509,10 +507,13 @@ static void uv__iou_init(int epollfd, size_t sqlen; size_t maxlen; size_t sqelen; + unsigned kernel_version; + uint32_t* sqarray; uint32_t i; char* sq; char* sqe; int ringfd; + int no_sqarray; sq = MAP_FAILED; sqe = MAP_FAILED; @@ -520,11 +521,15 @@ static void uv__iou_init(int epollfd, if (!uv__use_io_uring()) return; + kernel_version = uv__kernel_version(); + no_sqarray = + UV__IORING_SETUP_NO_SQARRAY * (kernel_version >= /* 6.6 */0x060600); + /* SQPOLL required CAP_SYS_NICE until linux v5.12 relaxed that requirement. * Mostly academic because we check for a v5.13 kernel afterwards anyway. */ memset(¶ms, 0, sizeof(params)); - params.flags = flags; + params.flags = flags | no_sqarray; if (flags & UV__IORING_SETUP_SQPOLL) params.sq_thread_idle = 10; /* milliseconds */ @@ -586,7 +591,6 @@ static void uv__iou_init(int epollfd, iou->sqhead = (uint32_t*) (sq + params.sq_off.head); iou->sqtail = (uint32_t*) (sq + params.sq_off.tail); iou->sqmask = *(uint32_t*) (sq + params.sq_off.ring_mask); - iou->sqarray = (uint32_t*) (sq + params.sq_off.array); iou->sqflags = (uint32_t*) (sq + params.sq_off.flags); iou->cqhead = (uint32_t*) (sq + params.cq_off.head); iou->cqtail = (uint32_t*) (sq + params.cq_off.tail); @@ -600,13 +604,13 @@ static void uv__iou_init(int epollfd, iou->sqelen = sqelen; iou->ringfd = ringfd; iou->in_flight = 0; - iou->flags = 0; - if (uv__kernel_version() >= /* 5.15.0 */ 0x050F00) - iou->flags |= UV__MKDIRAT_SYMLINKAT_LINKAT; + if (no_sqarray) + return; + sqarray = (uint32_t*) (sq + params.sq_off.array); for (i = 0; i <= iou->sqmask; i++) - iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */ + sqarray[i] = i; /* Slot -> sqe identity mapping. */ return; @@ -622,7 +626,7 @@ static void uv__iou_init(int epollfd, static void uv__iou_delete(struct uv__iou* iou) { - if (iou->ringfd != -1) { + if (iou->ringfd > -1) { munmap(iou->sq, iou->maxlen); munmap(iou->sqe, iou->sqelen); uv__close(iou->ringfd); @@ -636,7 +640,7 @@ int uv__platform_loop_init(uv_loop_t* loop) { lfields = uv__get_internal_fields(loop); lfields->ctl.ringfd = -1; - lfields->iou.ringfd = -1; + lfields->iou.ringfd = -2; /* "uninitialized" */ loop->inotify_watchers = NULL; loop->inotify_fd = -1; @@ -645,7 +649,6 @@ int uv__platform_loop_init(uv_loop_t* loop) { if (loop->backend_fd == -1) return UV__ERR(errno); - uv__iou_init(loop->backend_fd, &lfields->iou, 64, UV__IORING_SETUP_SQPOLL); uv__iou_init(loop->backend_fd, &lfields->ctl, 256, 0); return 0; @@ -713,23 +716,17 @@ void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { * This avoids a problem where the same file description remains open * in another process, causing repeated junk epoll events. * + * Perform EPOLL_CTL_DEL immediately instead of going through + * io_uring's submit queue, otherwise the file descriptor may + * be closed by the time the kernel starts the operation. + * * We pass in a dummy epoll_event, to work around a bug in old kernels. * * Work around a bug in kernels 3.10 to 3.19 where passing a struct that * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. */ memset(&dummy, 0, sizeof(dummy)); - - if (inv == NULL) { - epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy); - } else { - uv__epoll_ctl_prep(loop->backend_fd, - &lfields->ctl, - inv->prep, - EPOLL_CTL_DEL, - fd, - &dummy); - } + epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy); } @@ -764,6 +761,23 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, uint32_t mask; uint32_t slot; + /* Lazily create the ring. State machine: -2 means uninitialized, -1 means + * initialization failed. Anything else is a valid ring file descriptor. + */ + if (iou->ringfd == -2) { + /* By default, the SQPOLL is not created. Enable only if the loop is + * configured with UV_LOOP_USE_IO_URING_SQPOLL. + */ + if ((loop->flags & UV_LOOP_ENABLE_IO_URING_SQPOLL) == 0) { + iou->ringfd = -1; + return NULL; + } + + uv__iou_init(loop->backend_fd, iou, 64, UV__IORING_SETUP_SQPOLL); + if (iou->ringfd == -2) + iou->ringfd = -1; /* "failed" */ + } + if (iou->ringfd == -1) return NULL; @@ -787,7 +801,7 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, req->work_req.done = NULL; uv__queue_init(&req->work_req.wq); - uv__req_register(loop, req); + uv__req_register(loop); iou->in_flight++; return sqe; @@ -850,6 +864,26 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) { } +int uv__iou_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req) { + struct uv__io_uring_sqe* sqe; + struct uv__iou* iou; + + if (uv__kernel_version() < /* 6.9 */0x060900) + return 0; + + iou = &uv__get_internal_fields(loop)->iou; + sqe = uv__iou_get_sqe(iou, loop, req); + if (sqe == NULL) + return 0; + + sqe->fd = req->file; + sqe->len = req->off; + sqe->opcode = UV__IORING_OP_FTRUNCATE; + uv__iou_submit(iou); + + return 1; +} + int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, uv_fs_t* req, uint32_t fsync_flags) { @@ -879,11 +913,10 @@ int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) { struct uv__io_uring_sqe* sqe; struct uv__iou* iou; - iou = &uv__get_internal_fields(loop)->iou; - - if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) + if (uv__kernel_version() < /* 5.15.0 */0x050F00) return 0; + iou = &uv__get_internal_fields(loop)->iou; sqe = uv__iou_get_sqe(iou, loop, req); if (sqe == NULL) return 0; @@ -904,11 +937,10 @@ int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) { struct uv__io_uring_sqe* sqe; struct uv__iou* iou; - iou = &uv__get_internal_fields(loop)->iou; - - if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) + if (uv__kernel_version() < /* 5.15.0 */0x050F00) return 0; + iou = &uv__get_internal_fields(loop)->iou; sqe = uv__iou_get_sqe(iou, loop, req); if (sqe == NULL) return 0; @@ -972,11 +1004,10 @@ int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) { struct uv__io_uring_sqe* sqe; struct uv__iou* iou; - iou = &uv__get_internal_fields(loop)->iou; - - if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) + if (uv__kernel_version() < /* 5.15.0 */0x050F00) return 0; + iou = &uv__get_internal_fields(loop)->iou; sqe = uv__iou_get_sqe(iou, loop, req); if (sqe == NULL) return 0; @@ -1155,7 +1186,7 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) { req = (uv_fs_t*) (uintptr_t) e->user_data; assert(req->type == UV_FS); - uv__req_unregister(loop, req); + uv__req_unregister(loop); iou->in_flight--; /* If the op is not supported by the kernel retry using the thread pool */ @@ -1207,6 +1238,10 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) { } +/* Only for EPOLL_CTL_ADD and EPOLL_CTL_MOD. EPOLL_CTL_DEL should always be + * executed immediately, otherwise the file descriptor may have been closed + * by the time the kernel starts the operation. + */ static void uv__epoll_ctl_prep(int epollfd, struct uv__iou* ctl, struct epoll_event (*events)[256], @@ -1218,45 +1253,28 @@ static void uv__epoll_ctl_prep(int epollfd, uint32_t mask; uint32_t slot; - if (ctl->ringfd == -1) { - if (!epoll_ctl(epollfd, op, fd, e)) - return; - - if (op == EPOLL_CTL_DEL) - return; /* Ignore errors, may be racing with another thread. */ - - if (op != EPOLL_CTL_ADD) - abort(); - - if (errno != EEXIST) - abort(); - - /* File descriptor that's been watched before, update event mask. */ - if (!epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, e)) - return; + assert(op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD); + assert(ctl->ringfd != -1); - abort(); - } else { - mask = ctl->sqmask; - slot = (*ctl->sqtail)++ & mask; + mask = ctl->sqmask; + slot = (*ctl->sqtail)++ & mask; - pe = &(*events)[slot]; - *pe = *e; + pe = &(*events)[slot]; + *pe = *e; - sqe = ctl->sqe; - sqe = &sqe[slot]; + sqe = ctl->sqe; + sqe = &sqe[slot]; - memset(sqe, 0, sizeof(*sqe)); - sqe->addr = (uintptr_t) pe; - sqe->fd = epollfd; - sqe->len = op; - sqe->off = fd; - sqe->opcode = UV__IORING_OP_EPOLL_CTL; - sqe->user_data = op | slot << 2 | (int64_t) fd << 32; + memset(sqe, 0, sizeof(*sqe)); + sqe->addr = (uintptr_t) pe; + sqe->fd = epollfd; + sqe->len = op; + sqe->off = fd; + sqe->opcode = UV__IORING_OP_EPOLL_CTL; + sqe->user_data = op | slot << 2 | (int64_t) fd << 32; - if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask)) - uv__epoll_ctl_flush(epollfd, ctl, events); - } + if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask)) + uv__epoll_ctl_flush(epollfd, ctl, events); } @@ -1396,9 +1414,29 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { w->events = w->pevents; e.events = w->pevents; + if (w == &loop->async_io_watcher) + /* Enable edge-triggered mode on async_io_watcher(eventfd), + * so that we're able to eliminate the overhead of reading + * the eventfd via system call on each event loop wakeup. + */ + e.events |= EPOLLET; e.data.fd = w->fd; + fd = w->fd; + + if (ctl->ringfd != -1) { + uv__epoll_ctl_prep(epollfd, ctl, &prep, op, fd, &e); + continue; + } + + if (!epoll_ctl(epollfd, op, fd, &e)) + continue; - uv__epoll_ctl_prep(epollfd, ctl, &prep, op, w->fd, &e); + assert(op == EPOLL_CTL_ADD); + assert(errno == EEXIST); + + /* File descriptor that's been watched before, update event mask. */ + if (epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &e)) + abort(); } inv.events = events; @@ -1486,8 +1524,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { * * Ignore all errors because we may be racing with another thread * when the file descriptor is closed. + * + * Perform EPOLL_CTL_DEL immediately instead of going through + * io_uring's submit queue, otherwise the file descriptor may + * be closed by the time the kernel starts the operation. */ - uv__epoll_ctl_prep(epollfd, ctl, &prep, EPOLL_CTL_DEL, fd, pe); + epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, pe); continue; } @@ -1622,36 +1664,17 @@ uint64_t uv__hrtime(uv_clocktype_t type) { int uv_resident_set_memory(size_t* rss) { char buf[1024]; const char* s; - ssize_t n; long val; - int fd; + int rc; int i; - do - fd = open("/proc/self/stat", O_RDONLY); - while (fd == -1 && errno == EINTR); - - if (fd == -1) - return UV__ERR(errno); - - do - n = read(fd, buf, sizeof(buf) - 1); - while (n == -1 && errno == EINTR); - - uv__close(fd); - if (n == -1) - return UV__ERR(errno); - buf[n] = '\0'; - - s = strchr(buf, ' '); - if (s == NULL) - goto err; - - s += 1; - if (*s != '(') - goto err; + /* rss: 24th element */ + rc = uv__slurp("/proc/self/stat", buf, sizeof(buf)); + if (rc < 0) + return rc; - s = strchr(s, ')'); + /* find the last ')' */ + s = strrchr(buf, ')'); if (s == NULL) goto err; @@ -1663,9 +1686,7 @@ int uv_resident_set_memory(size_t* rss) { errno = 0; val = strtol(s, NULL, 10); - if (errno != 0) - goto err; - if (val < 0) + if (val < 0 || errno != 0) goto err; *rss = val * getpagesize(); @@ -2270,6 +2291,136 @@ uint64_t uv_get_available_memory(void) { } +static int uv__get_cgroupv2_constrained_cpu(const char* cgroup, + uv__cpu_constraint* constraint) { + char path[256]; + char buf[1024]; + unsigned int weight; + int cgroup_size; + const char* cgroup_trimmed; + char quota_buf[16]; + + if (strncmp(cgroup, "0::/", 4) != 0) + return UV_EINVAL; + + /* Trim ending \n by replacing it with a 0 */ + cgroup_trimmed = cgroup + sizeof("0::/") - 1; /* Skip the prefix "0::/" */ + cgroup_size = (int)strcspn(cgroup_trimmed, "\n"); /* Find the first slash */ + + /* Construct the path to the cpu.max file */ + snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.max", cgroup_size, + cgroup_trimmed); + + /* Read cpu.max */ + if (uv__slurp(path, buf, sizeof(buf)) < 0) + return UV_EIO; + + if (sscanf(buf, "%15s %llu", quota_buf, &constraint->period_length) != 2) + return UV_EINVAL; + + if (strncmp(quota_buf, "max", 3) == 0) + constraint->quota_per_period = LLONG_MAX; + else if (sscanf(quota_buf, "%lld", &constraint->quota_per_period) != 1) + return UV_EINVAL; // conversion failed + + /* Construct the path to the cpu.weight file */ + snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.weight", cgroup_size, + cgroup_trimmed); + + /* Read cpu.weight */ + if (uv__slurp(path, buf, sizeof(buf)) < 0) + return UV_EIO; + + if (sscanf(buf, "%u", &weight) != 1) + return UV_EINVAL; + + constraint->proportions = (double)weight / 100.0; + + return 0; +} + +static char* uv__cgroup1_find_cpu_controller(const char* cgroup, + int* cgroup_size) { + /* Seek to the cpu controller line. */ + char* cgroup_cpu = strstr(cgroup, ":cpu,"); + + if (cgroup_cpu != NULL) { + /* Skip the controller prefix to the start of the cgroup path. */ + cgroup_cpu += sizeof(":cpu,") - 1; + /* Determine the length of the cgroup path, excluding the newline. */ + *cgroup_size = (int)strcspn(cgroup_cpu, "\n"); + } + + return cgroup_cpu; +} + +static int uv__get_cgroupv1_constrained_cpu(const char* cgroup, + uv__cpu_constraint* constraint) { + char path[256]; + char buf[1024]; + unsigned int shares; + int cgroup_size; + char* cgroup_cpu; + + cgroup_cpu = uv__cgroup1_find_cpu_controller(cgroup, &cgroup_size); + + if (cgroup_cpu == NULL) + return UV_EIO; + + /* Construct the path to the cpu.cfs_quota_us file */ + snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.cfs_quota_us", + cgroup_size, cgroup_cpu); + + if (uv__slurp(path, buf, sizeof(buf)) < 0) + return UV_EIO; + + if (sscanf(buf, "%lld", &constraint->quota_per_period) != 1) + return UV_EINVAL; + + /* Construct the path to the cpu.cfs_period_us file */ + snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.cfs_period_us", + cgroup_size, cgroup_cpu); + + /* Read cpu.cfs_period_us */ + if (uv__slurp(path, buf, sizeof(buf)) < 0) + return UV_EIO; + + if (sscanf(buf, "%lld", &constraint->period_length) != 1) + return UV_EINVAL; + + /* Construct the path to the cpu.shares file */ + snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.shares", cgroup_size, + cgroup_cpu); + + /* Read cpu.shares */ + if (uv__slurp(path, buf, sizeof(buf)) < 0) + return UV_EIO; + + if (sscanf(buf, "%u", &shares) != 1) + return UV_EINVAL; + + constraint->proportions = (double)shares / 1024.0; + + return 0; +} + +int uv__get_constrained_cpu(uv__cpu_constraint* constraint) { + char cgroup[1024]; + + /* Read the cgroup from /proc/self/cgroup */ + if (uv__slurp("/proc/self/cgroup", cgroup, sizeof(cgroup)) < 0) + return UV_EIO; + + /* Check if the system is using cgroup v2 by examining /proc/self/cgroup + * The entry for cgroup v2 is always in the format "0::$PATH" + * see https://docs.kernel.org/admin-guide/cgroup-v2.html */ + if (strncmp(cgroup, "0::/", 4) == 0) + return uv__get_cgroupv2_constrained_cpu(cgroup, constraint); + else + return uv__get_cgroupv1_constrained_cpu(cgroup, constraint); +} + + void uv_loadavg(double avg[3]) { struct sysinfo info; char buf[128]; /* Large enough to hold all of /proc/loadavg. */ diff --git a/deps/uv/src/unix/loop.c b/deps/uv/src/unix/loop.c index a9468e8e19cbed..179ee999d8052e 100644 --- a/deps/uv/src/unix/loop.c +++ b/deps/uv/src/unix/loop.c @@ -217,6 +217,14 @@ int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { return 0; } +#if defined(__linux__) + if (option == UV_LOOP_USE_IO_URING_SQPOLL) { + loop->flags |= UV_LOOP_ENABLE_IO_URING_SQPOLL; + return 0; + } +#endif + + if (option != UV_LOOP_BLOCK_SIGNAL) return UV_ENOSYS; diff --git a/deps/uv/src/unix/pipe.c b/deps/uv/src/unix/pipe.c index fca364426f809e..1f9acfac41e9c5 100644 --- a/deps/uv/src/unix/pipe.c +++ b/deps/uv/src/unix/pipe.c @@ -76,8 +76,13 @@ int uv_pipe_bind2(uv_pipe_t* handle, if (name == NULL) return UV_EINVAL; + /* namelen==0 on Linux means autobind the listen socket in the abstract + * socket namespace, see `man 7 unix` for details. + */ +#if !defined(__linux__) if (namelen == 0) return UV_EINVAL; +#endif if (includes_nul(name, namelen)) return UV_EINVAL; @@ -344,8 +349,15 @@ static int uv__pipe_getsockpeername(const uv_pipe_t* handle, uv__peersockfunc func, char* buffer, size_t* size) { +#if defined(__linux__) + static const int is_linux = 1; +#else + static const int is_linux = 0; +#endif struct sockaddr_un sa; socklen_t addrlen; + size_t slop; + char* p; int err; addrlen = sizeof(sa); @@ -359,17 +371,20 @@ static int uv__pipe_getsockpeername(const uv_pipe_t* handle, return err; } -#if defined(__linux__) - if (sa.sun_path[0] == 0) - /* Linux abstract namespace */ + slop = 1; + if (is_linux && sa.sun_path[0] == '\0') { + /* Linux abstract namespace. Not zero-terminated. */ + slop = 0; addrlen -= offsetof(struct sockaddr_un, sun_path); - else -#endif - addrlen = strlen(sa.sun_path); - + } else { + p = memchr(sa.sun_path, '\0', sizeof(sa.sun_path)); + if (p == NULL) + p = ARRAY_END(sa.sun_path); + addrlen = p - sa.sun_path; + } - if ((size_t)addrlen >= *size) { - *size = addrlen + 1; + if ((size_t)addrlen + slop > *size) { + *size = addrlen + slop; return UV_ENOBUFS; } @@ -487,7 +502,11 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) { int uv_pipe(uv_os_fd_t fds[2], int read_flags, int write_flags) { uv_os_fd_t temp[2]; int err; -#if defined(__FreeBSD__) || defined(__linux__) +#if defined(__linux__) || \ + defined(__FreeBSD__) || \ + defined(__OpenBSD__) || \ + defined(__DragonFly__) || \ + defined(__NetBSD__) int flags = O_CLOEXEC; if ((read_flags & UV_NONBLOCK_PIPE) && (write_flags & UV_NONBLOCK_PIPE)) diff --git a/deps/uv/src/unix/process.c b/deps/uv/src/unix/process.c index 4812a90f2f5047..f2038f2c0e823e 100644 --- a/deps/uv/src/unix/process.c +++ b/deps/uv/src/unix/process.c @@ -55,7 +55,8 @@ extern char **environ; #endif -#if defined(__linux__) +#if defined(__linux__) || \ + defined(__GNU__) # include #endif @@ -63,11 +64,7 @@ extern char **environ; # include "zos-base.h" #endif -#if defined(__APPLE__) || \ - defined(__DragonFly__) || \ - defined(__FreeBSD__) || \ - defined(__NetBSD__) || \ - defined(__OpenBSD__) +#ifdef UV_HAVE_KQUEUE #include #else #define UV_USE_SIGCHLD diff --git a/deps/uv/src/unix/signal.c b/deps/uv/src/unix/signal.c index bc4206e6d864c8..f23c887d0d6788 100644 --- a/deps/uv/src/unix/signal.c +++ b/deps/uv/src/unix/signal.c @@ -195,7 +195,7 @@ static void uv__signal_handler(int signum) { for (handle = uv__signal_first_handle(signum); handle != NULL && handle->signum == signum; - handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) { + handle = RB_NEXT(uv__signal_tree_s, handle)) { int r; msg.signum = signum; diff --git a/deps/uv/src/unix/stream.c b/deps/uv/src/unix/stream.c index 28c4d5463c4622..18763b4744c30a 100644 --- a/deps/uv/src/unix/stream.c +++ b/deps/uv/src/unix/stream.c @@ -457,7 +457,7 @@ void uv__stream_destroy(uv_stream_t* stream) { assert(stream->flags & UV_HANDLE_CLOSED); if (stream->connect_req) { - uv__req_unregister(stream->loop, stream->connect_req); + uv__req_unregister(stream->loop); stream->connect_req->cb(stream->connect_req, UV_ECANCELED); stream->connect_req = NULL; } @@ -642,7 +642,7 @@ static void uv__drain(uv_stream_t* stream) { if ((stream->flags & UV_HANDLE_CLOSING) || !(stream->flags & UV_HANDLE_SHUT)) { stream->shutdown_req = NULL; - uv__req_unregister(stream->loop, req); + uv__req_unregister(stream->loop); err = 0; if (stream->flags & UV_HANDLE_CLOSING) @@ -698,7 +698,8 @@ static int uv__write_req_update(uv_stream_t* stream, do { len = n < buf->len ? n : buf->len; - buf->base += len; + if (buf->len != 0) + buf->base += len; buf->len -= len; buf += (buf->len == 0); /* Advance to next buffer if this one is empty. */ n -= len; @@ -912,7 +913,7 @@ static void uv__write_callbacks(uv_stream_t* stream) { q = uv__queue_head(&pq); req = uv__queue_data(q, uv_write_t, queue); uv__queue_remove(q); - uv__req_unregister(stream->loop, req); + uv__req_unregister(stream->loop); if (req->bufs != NULL) { stream->write_queue_size -= uv__write_req_size(req); @@ -979,11 +980,13 @@ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) { static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { struct cmsghdr* cmsg; + char* p; + char* pe; int fd; int err; - size_t i; size_t count; + err = 0; for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (cmsg->cmsg_type != SCM_RIGHTS) { fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n", @@ -996,24 +999,26 @@ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { assert(count % sizeof(fd) == 0); count /= sizeof(fd); - for (i = 0; i < count; i++) { - memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd)); - /* Already has accepted fd, queue now */ - if (stream->accepted_fd != -1) { - err = uv__stream_queue_fd(stream, fd); - if (err != 0) { - /* Close rest */ - for (; i < count; i++) - uv__close(fd); - return err; - } - } else { - stream->accepted_fd = fd; + p = (void*) CMSG_DATA(cmsg); + pe = p + count * sizeof(fd); + + while (p < pe) { + memcpy(&fd, p, sizeof(fd)); + p += sizeof(fd); + + if (err == 0) { + if (stream->accepted_fd == -1) + stream->accepted_fd = fd; + else + err = uv__stream_queue_fd(stream, fd); } + + if (err != 0) + uv__close(fd); } } - return 0; + return err; } @@ -1268,7 +1273,7 @@ static void uv__stream_connect(uv_stream_t* stream) { return; stream->connect_req = NULL; - uv__req_unregister(stream->loop, req); + uv__req_unregister(stream->loop); if (error < 0 || uv__queue_empty(&stream->write_queue)) { uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); diff --git a/deps/uv/src/unix/tcp.c b/deps/uv/src/unix/tcp.c index 799fca77aa5392..98970d75278e31 100644 --- a/deps/uv/src/unix/tcp.c +++ b/deps/uv/src/unix/tcp.c @@ -167,6 +167,12 @@ int uv__tcp_bind(uv_tcp_t* tcp, if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on))) return UV__ERR(errno); + if (flags & UV_TCP_REUSEPORT) { + err = uv__sock_reuseport(tcp->io_watcher.fd); + if (err) + return err; + } + #ifndef __OpenBSD__ #ifdef IPV6_V6ONLY if (addr->sa_family == AF_INET6) { @@ -452,6 +458,14 @@ int uv__tcp_nodelay(int fd, int on) { } +#if (defined(UV__SOLARIS_11_4) && !UV__SOLARIS_11_4) || \ + (defined(__DragonFly__) && __DragonFly_version < 500702) +/* DragonFlyBSD <500702 and Solaris <11.4 require millisecond units + * for TCP keepalive options. */ +#define UV_KEEPALIVE_FACTOR(x) (x *= 1000) +#else +#define UV_KEEPALIVE_FACTOR(x) +#endif int uv__tcp_keepalive(int fd, int on, unsigned int delay) { int idle; int intvl; @@ -467,8 +481,8 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) { if (!on) return 0; - if (delay == 0) - return -1; + if (delay < 1) + return UV_EINVAL; #ifdef __sun /* The implementation of TCP keep-alive on Solaris/SmartOS is a bit unusual @@ -501,49 +515,53 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) { if (idle > 10*24*60*60) idle = 10*24*60*60; + UV_KEEPALIVE_FACTOR(idle); + /* `TCP_KEEPIDLE`, `TCP_KEEPINTVL`, and `TCP_KEEPCNT` were not available on Solaris * until version 11.4, but let's take a chance here. */ #if defined(TCP_KEEPIDLE) && defined(TCP_KEEPINTVL) && defined(TCP_KEEPCNT) if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(idle))) return UV__ERR(errno); - intvl = idle/3; + intvl = 10; /* required at least 10 seconds */ + UV_KEEPALIVE_FACTOR(intvl); if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl))) return UV__ERR(errno); - cnt = 3; + cnt = 1; /* 1 retry, ensure (TCP_KEEPINTVL * TCP_KEEPCNT) is 10 seconds */ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt))) return UV__ERR(errno); #else /* Fall back to the first implementation of tcp-alive mechanism for older Solaris, * simulate the tcp-alive mechanism on other platforms via `TCP_KEEPALIVE_THRESHOLD` + `TCP_KEEPALIVE_ABORT_THRESHOLD`. */ - idle *= 1000; /* kernel expects milliseconds */ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE_THRESHOLD, &idle, sizeof(idle))) return UV__ERR(errno); /* Note that the consequent probes will not be sent at equal intervals on Solaris, * but will be sent using the exponential backoff algorithm. */ - intvl = idle/3; - cnt = 3; - int time_to_abort = intvl * cnt; + int time_to_abort = 10; /* 10 seconds */ + UV_KEEPALIVE_FACTOR(time_to_abort); if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE_ABORT_THRESHOLD, &time_to_abort, sizeof(time_to_abort))) return UV__ERR(errno); #endif #else /* !defined(__sun) */ + idle = delay; + UV_KEEPALIVE_FACTOR(idle); #ifdef TCP_KEEPIDLE - if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay))) + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(idle))) return UV__ERR(errno); #elif defined(TCP_KEEPALIVE) /* Darwin/macOS uses TCP_KEEPALIVE in place of TCP_KEEPIDLE. */ - if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay))) + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &idle, sizeof(idle))) return UV__ERR(errno); #endif #ifdef TCP_KEEPINTVL - intvl = 1; /* 1 second; same as default on Win32 */ + intvl = 1; /* 1 second; same as default on Win32 */ + UV_KEEPALIVE_FACTOR(intvl); if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl))) return UV__ERR(errno); #endif @@ -612,7 +630,7 @@ void uv__tcp_close(uv_tcp_t* handle) { int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) { uv_os_sock_t temp[2]; int err; -#if defined(__FreeBSD__) || defined(__linux__) +#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) int flags; flags = type | SOCK_CLOEXEC; diff --git a/deps/uv/src/unix/tty.c b/deps/uv/src/unix/tty.c index d099bdb3b67721..793054ba5a9bff 100644 --- a/deps/uv/src/unix/tty.c +++ b/deps/uv/src/unix/tty.c @@ -335,6 +335,37 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { } +void uv__tty_close(uv_tty_t* handle) { + int expected; + int fd; + + fd = handle->io_watcher.fd; + if (fd == -1) + goto done; + + /* This is used for uv_tty_reset_mode() */ + do + expected = 0; + while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1)); + + if (fd == orig_termios_fd) { + /* XXX(bnoordhuis) the tcsetattr is probably wrong when there are still + * other uv_tty_t handles active that refer to the same tty/pty but it's + * hard to recognize that particular situation without maintaining some + * kind of process-global data structure, and that still won't work in a + * multi-process setup. + */ + uv__tcsetattr(fd, TCSANOW, &orig_termios); + orig_termios_fd = -1; + } + + atomic_store(&termios_spinlock, 0); + +done: + uv__stream_close((uv_stream_t*) handle); +} + + int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) { struct winsize ws; int err; @@ -452,7 +483,7 @@ int uv_tty_reset_mode(void) { saved_errno = errno; if (atomic_exchange(&termios_spinlock, 1)) - return UV_EBUSY; /* In uv_tty_set_mode(). */ + return UV_EBUSY; /* In uv_tty_set_mode() or uv__tty_close(). */ err = 0; if (orig_termios_fd != -1) diff --git a/deps/uv/src/unix/udp.c b/deps/uv/src/unix/udp.c index c2814512a5f507..f6640fc7231863 100644 --- a/deps/uv/src/unix/udp.c +++ b/deps/uv/src/unix/udp.c @@ -100,7 +100,7 @@ static void uv__udp_run_completed(uv_udp_t* handle) { uv__queue_remove(q); req = uv__queue_data(q, uv_udp_send_t, queue); - uv__req_unregister(handle->loop, req); + uv__req_unregister(handle->loop); handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count--; @@ -141,14 +141,14 @@ static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) { if (revents & POLLIN) uv__udp_recvmsg(handle); - if (revents & POLLOUT) { + if (revents & POLLOUT && !uv__is_closing(handle)) { uv__udp_sendmsg(handle); uv__udp_run_completed(handle); } } static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) struct sockaddr_in6 peers[20]; struct iovec iov[ARRAY_SIZE(peers)]; struct mmsghdr msgs[ARRAY_SIZE(peers)]; @@ -173,11 +173,18 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { msgs[k].msg_hdr.msg_control = NULL; msgs[k].msg_hdr.msg_controllen = 0; msgs[k].msg_hdr.msg_flags = 0; + msgs[k].msg_len = 0; } +#if defined(__APPLE__) + do + nread = recvmsg_x(handle->io_watcher.fd, msgs, chunks, MSG_DONTWAIT); + while (nread == -1 && errno == EINTR); +#else do nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL); while (nread == -1 && errno == EINTR); +#endif if (nread < 1) { if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK) @@ -204,9 +211,9 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE); } return nread; -#else /* __linux__ || ____FreeBSD__ */ +#else /* __linux__ || ____FreeBSD__ || __APPLE__ */ return UV_ENOSYS; -#endif /* __linux__ || ____FreeBSD__ */ +#endif /* __linux__ || ____FreeBSD__ || __APPLE__ */ } static void uv__udp_recvmsg(uv_udp_t* handle) { @@ -275,8 +282,61 @@ static void uv__udp_recvmsg(uv_udp_t* handle) { && handle->recv_cb != NULL); } -static void uv__udp_sendmsg(uv_udp_t* handle) { -#if defined(__linux__) || defined(__FreeBSD__) +static void uv__udp_sendmsg_one(uv_udp_t* handle, uv_udp_send_t* req) { + struct uv__queue* q; + struct msghdr h; + ssize_t size; + + for (;;) { + memset(&h, 0, sizeof h); + if (req->addr.ss_family == AF_UNSPEC) { + h.msg_name = NULL; + h.msg_namelen = 0; + } else { + h.msg_name = &req->addr; + if (req->addr.ss_family == AF_INET6) + h.msg_namelen = sizeof(struct sockaddr_in6); + else if (req->addr.ss_family == AF_INET) + h.msg_namelen = sizeof(struct sockaddr_in); + else if (req->addr.ss_family == AF_UNIX) + h.msg_namelen = sizeof(struct sockaddr_un); + else { + assert(0 && "unsupported address family"); + abort(); + } + } + h.msg_iov = (struct iovec*) req->bufs; + h.msg_iovlen = req->nbufs; + + do + size = sendmsg(handle->io_watcher.fd, &h, 0); + while (size == -1 && errno == EINTR); + + if (size == -1) + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) + return; + + req->status = (size == -1 ? UV__ERR(errno) : size); + + /* Sending a datagram is an atomic operation: either all data + * is written or nothing is (and EMSGSIZE is raised). That is + * why we don't handle partial writes. Just pop the request + * off the write queue and onto the completed queue, done. + */ + uv__queue_remove(&req->queue); + uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); + uv__io_feed(handle->loop, &handle->io_watcher); + + if (uv__queue_empty(&handle->write_queue)) + return; + + q = uv__queue_head(&handle->write_queue); + req = uv__queue_data(q, uv_udp_send_t, queue); + } +} + +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) +static void uv__udp_sendmsg_many(uv_udp_t* handle) { uv_udp_send_t* req; struct mmsghdr h[20]; struct mmsghdr* p; @@ -285,16 +345,11 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { size_t pkts; size_t i; - if (uv__queue_empty(&handle->write_queue)) - return; - write_queue_drain: for (pkts = 0, q = uv__queue_head(&handle->write_queue); pkts < ARRAY_SIZE(h) && q != &handle->write_queue; ++pkts, q = uv__queue_head(q)) { - assert(q != NULL); req = uv__queue_data(q, uv_udp_send_t, queue); - assert(req != NULL); p = &h[pkts]; memset(p, 0, sizeof(*p)); @@ -318,9 +373,15 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { h[pkts].msg_hdr.msg_iovlen = req->nbufs; } +#if defined(__APPLE__) + do + npkts = sendmsg_x(handle->io_watcher.fd, h, pkts, MSG_DONTWAIT); + while (npkts == -1 && errno == EINTR); +#else do npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0); while (npkts == -1 && errno == EINTR); +#endif if (npkts < 1) { if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) @@ -328,10 +389,7 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { for (i = 0, q = uv__queue_head(&handle->write_queue); i < pkts && q != &handle->write_queue; ++i, q = uv__queue_head(&handle->write_queue)) { - assert(q != NULL); req = uv__queue_data(q, uv_udp_send_t, queue); - assert(req != NULL); - req->status = UV__ERR(errno); uv__queue_remove(&req->queue); uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); @@ -346,10 +404,7 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { for (i = 0, q = uv__queue_head(&handle->write_queue); i < (size_t)npkts && q != &handle->write_queue; ++i, q = uv__queue_head(&handle->write_queue)) { - assert(q != NULL); req = uv__queue_data(q, uv_udp_send_t, queue); - assert(req != NULL); - req->status = req->bufs[0].len; /* Sending a datagram is an atomic operation: either all data @@ -364,75 +419,48 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { /* couldn't batch everything, continue sending (jump to avoid stack growth) */ if (!uv__queue_empty(&handle->write_queue)) goto write_queue_drain; - uv__io_feed(handle->loop, &handle->io_watcher); -#else /* __linux__ || ____FreeBSD__ */ - uv_udp_send_t* req; - struct msghdr h; - struct uv__queue* q; - ssize_t size; - while (!uv__queue_empty(&handle->write_queue)) { - q = uv__queue_head(&handle->write_queue); - assert(q != NULL); - - req = uv__queue_data(q, uv_udp_send_t, queue); - assert(req != NULL); + uv__io_feed(handle->loop, &handle->io_watcher); +} +#endif /* __linux__ || ____FreeBSD__ || __APPLE__ */ - memset(&h, 0, sizeof h); - if (req->addr.ss_family == AF_UNSPEC) { - h.msg_name = NULL; - h.msg_namelen = 0; - } else { - h.msg_name = &req->addr; - if (req->addr.ss_family == AF_INET6) - h.msg_namelen = sizeof(struct sockaddr_in6); - else if (req->addr.ss_family == AF_INET) - h.msg_namelen = sizeof(struct sockaddr_in); - else if (req->addr.ss_family == AF_UNIX) - h.msg_namelen = sizeof(struct sockaddr_un); - else { - assert(0 && "unsupported address family"); - abort(); - } - } - h.msg_iov = (struct iovec*) req->bufs; - h.msg_iovlen = req->nbufs; +static void uv__udp_sendmsg(uv_udp_t* handle) { + struct uv__queue* q; + uv_udp_send_t* req; - do { - size = sendmsg(handle->io_watcher.fd, &h, 0); - } while (size == -1 && errno == EINTR); + if (uv__queue_empty(&handle->write_queue)) + return; - if (size == -1) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) - break; - } + q = uv__queue_head(&handle->write_queue); + req = uv__queue_data(q, uv_udp_send_t, queue); - req->status = (size == -1 ? UV__ERR(errno) : size); +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) + /* Use sendmmsg() if this send request contains more than one datagram OR + * there is more than one send request (because that automatically implies + * there is more than one datagram.) + */ + if (req->nbufs != 1 || &handle->write_queue != uv__queue_next(&req->queue)) + return uv__udp_sendmsg_many(handle); +#endif - /* Sending a datagram is an atomic operation: either all data - * is written or nothing is (and EMSGSIZE is raised). That is - * why we don't handle partial writes. Just pop the request - * off the write queue and onto the completed queue, done. - */ - uv__queue_remove(&req->queue); - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); - uv__io_feed(handle->loop, &handle->io_watcher); - } -#endif /* __linux__ || ____FreeBSD__ */ + return uv__udp_sendmsg_one(handle, req); } /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional - * refinements for programs that use multicast. + * refinements for programs that use multicast. Therefore we preferentially + * set SO_REUSEPORT over SO_REUSEADDR here, but we set SO_REUSEPORT only + * when that socket option doesn't have the capability of load balancing. + * Otherwise, we fall back to SO_REUSEADDR. * - * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that - * are different from the BSDs: it _shares_ the port rather than steal it - * from the current listener. While useful, it's not something we can emulate - * on other platforms so we don't enable it. + * Linux as of 3.9, DragonflyBSD 3.6, AIX 7.2.5 have the SO_REUSEPORT socket + * option but with semantics that are different from the BSDs: it _shares_ + * the port rather than steals it from the current listener. While useful, + * it's not something we can emulate on other platforms so we don't enable it. * * zOS does not support getsockname with SO_REUSEPORT option when using * AF_UNIX. */ -static int uv__set_reuse(int fd) { +static int uv__sock_reuseaddr(int fd) { int yes; yes = 1; @@ -449,7 +477,7 @@ static int uv__set_reuse(int fd) { return UV__ERR(errno); } #elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \ - !defined(__sun__) + !defined(__sun__) && !defined(__DragonFly__) && !defined(_AIX73) if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) return UV__ERR(errno); #else @@ -492,7 +520,8 @@ int uv__udp_bind(uv_udp_t* handle, int fd; /* Check for bad flags. */ - if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR)) + if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | + UV_UDP_REUSEPORT | UV_UDP_LINUX_RECVERR)) return UV_EINVAL; /* Cannot set IPv6-only mode on non-IPv6 socket. */ @@ -515,7 +544,13 @@ int uv__udp_bind(uv_udp_t* handle, } if (flags & UV_UDP_REUSEADDR) { - err = uv__set_reuse(fd); + err = uv__sock_reuseaddr(fd); + if (err) + return err; + } + + if (flags & UV_UDP_REUSEPORT) { + err = uv__sock_reuseport(fd); if (err) return err; } @@ -722,7 +757,7 @@ int uv__udp_send(uv_udp_send_t* req, req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) { - uv__req_unregister(handle->loop, req); + uv__req_unregister(handle->loop); return UV_ENOMEM; } @@ -1015,7 +1050,7 @@ int uv__udp_init_ex(uv_loop_t* loop, int uv_udp_using_recvmmsg(const uv_udp_t* handle) { -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) if (handle->flags & UV_HANDLE_UDP_RECVMMSG) return 1; #endif @@ -1037,7 +1072,7 @@ int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) { if (err) return err; - err = uv__set_reuse(sock); + err = uv__sock_reuseaddr(sock); if (err) return err; diff --git a/deps/uv/src/uv-common.h b/deps/uv/src/uv-common.h index cd57e5a35153d0..4baede2e506ee1 100644 --- a/deps/uv/src/uv-common.h +++ b/deps/uv/src/uv-common.h @@ -233,13 +233,13 @@ void uv__threadpool_cleanup(void); #define uv__has_active_reqs(loop) \ ((loop)->active_reqs.count > 0) -#define uv__req_register(loop, req) \ +#define uv__req_register(loop) \ do { \ (loop)->active_reqs.count++; \ } \ while (0) -#define uv__req_unregister(loop, req) \ +#define uv__req_unregister(loop) \ do { \ assert(uv__has_active_reqs(loop)); \ (loop)->active_reqs.count--; \ @@ -349,7 +349,7 @@ void uv__threadpool_cleanup(void); #define uv__req_init(loop, req, typ) \ do { \ UV_REQ_INIT(req, typ); \ - uv__req_register(loop, req); \ + uv__req_register(loop); \ } \ while (0) @@ -400,7 +400,6 @@ void uv__metrics_set_provider_entry_time(uv_loop_t* loop); struct uv__iou { uint32_t* sqhead; uint32_t* sqtail; - uint32_t* sqarray; uint32_t sqmask; uint32_t* sqflags; uint32_t* cqhead; @@ -415,7 +414,6 @@ struct uv__iou { size_t sqelen; int ringfd; uint32_t in_flight; - uint32_t flags; }; #endif /* __linux__ */ diff --git a/deps/uv/src/win/error.c b/deps/uv/src/win/error.c index 3a269da87a948b..58587c5fb785ea 100644 --- a/deps/uv/src/win/error.c +++ b/deps/uv/src/win/error.c @@ -78,6 +78,7 @@ int uv_translate_sys_error(int sys_errno) { case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL; case WSAEAFNOSUPPORT: return UV_EAFNOSUPPORT; case WSAEWOULDBLOCK: return UV_EAGAIN; + case ERROR_NO_DATA: return UV_EAGAIN; case WSAEALREADY: return UV_EALREADY; case ERROR_INVALID_FLAGS: return UV_EBADF; case ERROR_INVALID_HANDLE: return UV_EBADF; @@ -157,7 +158,6 @@ int uv_translate_sys_error(int sys_errno) { case ERROR_ACCESS_DENIED: return UV_EPERM; case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM; case ERROR_BAD_PIPE: return UV_EPIPE; - case ERROR_NO_DATA: return UV_EPIPE; case ERROR_PIPE_NOT_CONNECTED: return UV_EPIPE; case WSAESHUTDOWN: return UV_EPIPE; case WSAEPROTONOSUPPORT: return UV_EPROTONOSUPPORT; @@ -168,6 +168,16 @@ int uv_translate_sys_error(int sys_errno) { case ERROR_INVALID_FUNCTION: return UV_EISDIR; case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG; case WSAESOCKTNOSUPPORT: return UV_ESOCKTNOSUPPORT; + case ERROR_BAD_EXE_FORMAT: return UV_EFTYPE; default: return UV_UNKNOWN; } } + +int uv_translate_write_sys_error(int sys_errno) { + switch (sys_errno) { + case ERROR_BROKEN_PIPE: return UV_EPIPE; + case ERROR_NO_DATA: return UV_EPIPE; + default: + return uv_translate_sys_error(sys_errno); + } +} diff --git a/deps/uv/src/win/fs-event.c b/deps/uv/src/win/fs-event.c index fce411813e9370..7ab407e05345f9 100644 --- a/deps/uv/src/win/fs-event.c +++ b/deps/uv/src/win/fs-event.c @@ -561,7 +561,25 @@ void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req, } } else { err = GET_REQ_ERROR(req); - handle->cb(handle, NULL, 0, uv_translate_sys_error(err)); + /* + * Check whether the ERROR_ACCESS_DENIED is caused by the watched directory + * being actually deleted (not an actual error) or a legit error. Retrieve + * FileStandardInfo to check whether the directory is pending deletion. + */ + FILE_STANDARD_INFO info; + if (err == ERROR_ACCESS_DENIED && + handle->dirw != NULL && + GetFileInformationByHandleEx(handle->dir_handle, + FileStandardInfo, + &info, + sizeof(info)) && + info.Directory && + info.DeletePending) { + uv__convert_utf16_to_utf8(handle->dirw, -1, &filename); + handle->cb(handle, filename, UV_RENAME, 0); + } else { + handle->cb(handle, NULL, 0, uv_translate_sys_error(err)); + } } if (handle->flags & UV_HANDLE_CLOSING) { diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c index b73c17d8c1c831..08b42eb14c972a 100644 --- a/deps/uv/src/win/fs.c +++ b/deps/uv/src/win/fs.c @@ -46,6 +46,17 @@ #define UV_FS_FREE_PTR 0x0008 #define UV_FS_CLEANEDUP 0x0010 +#ifndef FILE_DISPOSITION_DELETE +#define FILE_DISPOSITION_DELETE 0x0001 +#endif /* FILE_DISPOSITION_DELETE */ + +#ifndef FILE_DISPOSITION_POSIX_SEMANTICS +#define FILE_DISPOSITION_POSIX_SEMANTICS 0x0002 +#endif /* FILE_DISPOSITION_POSIX_SEMANTICS */ + +#ifndef FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE +#define FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE 0x0010 +#endif /* FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE */ #define INIT(subtype) \ do { \ @@ -58,7 +69,7 @@ #define POST \ do { \ if (cb != NULL) { \ - uv__req_register(loop, req); \ + uv__req_register(loop); \ uv__work_submit(loop, \ &req->work_req, \ UV__WORK_FAST_IO, \ @@ -97,13 +108,14 @@ return; \ } -#define MILLION ((int64_t) 1000 * 1000) -#define BILLION ((int64_t) 1000 * 1000 * 1000) +#define NSEC_PER_TICK 100 +#define TICKS_PER_SEC ((int64_t) 1e9 / NSEC_PER_TICK) +static const int64_t WIN_TO_UNIX_TICK_OFFSET = 11644473600 * TICKS_PER_SEC; static void uv__filetime_to_timespec(uv_timespec_t *ts, int64_t filetime) { - filetime -= 116444736 * BILLION; - ts->tv_sec = (long) (filetime / (10 * MILLION)); - ts->tv_nsec = (long) ((filetime - ts->tv_sec * 10 * MILLION) * 100U); + filetime -= WIN_TO_UNIX_TICK_OFFSET; + ts->tv_sec = filetime / TICKS_PER_SEC; + ts->tv_nsec = (filetime % TICKS_PER_SEC) * NSEC_PER_TICK; if (ts->tv_nsec < 0) { ts->tv_sec -= 1; ts->tv_nsec += 1e9; @@ -112,7 +124,7 @@ static void uv__filetime_to_timespec(uv_timespec_t *ts, int64_t filetime) { #define TIME_T_TO_FILETIME(time, filetime_ptr) \ do { \ - int64_t bigtime = ((time) * 10 * MILLION + 116444736 * BILLION); \ + int64_t bigtime = ((time) * TICKS_PER_SEC + WIN_TO_UNIX_TICK_OFFSET); \ (filetime_ptr)->dwLowDateTime = (uint64_t) bigtime & 0xFFFFFFFF; \ (filetime_ptr)->dwHighDateTime = (uint64_t) bigtime >> 32; \ } while(0) @@ -136,6 +148,16 @@ static int uv__file_symlink_usermode_flag = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGE static DWORD uv__allocation_granularity; +typedef enum { + FS__STAT_PATH_SUCCESS, + FS__STAT_PATH_ERROR, + FS__STAT_PATH_TRY_SLOW +} fs__stat_path_return_t; + +INLINE static void fs__stat_assign_statbuf_null(uv_stat_t* statbuf); +INLINE static void fs__stat_assign_statbuf(uv_stat_t* statbuf, + FILE_STAT_BASIC_INFORMATION stat_info, int do_lstat); + void uv__fs_init(void) { SYSTEM_INFO system_info; @@ -1056,27 +1078,20 @@ void fs__write(uv_fs_t* req) { error = ERROR_INVALID_FLAGS; } - SET_REQ_WIN32_ERROR(req, error); + SET_REQ_UV_ERROR(req, uv_translate_write_sys_error(error), error); } } -void fs__rmdir(uv_fs_t* req) { - int result = _wrmdir(req->file.pathw); - if (result == -1) - SET_REQ_WIN32_ERROR(req, _doserrno); - else - SET_REQ_RESULT(req, 0); -} - - -void fs__unlink(uv_fs_t* req) { +static void fs__unlink_rmdir(uv_fs_t* req, BOOL isrmdir) { const WCHAR* pathw = req->file.pathw; HANDLE handle; BY_HANDLE_FILE_INFORMATION info; FILE_DISPOSITION_INFORMATION disposition; + FILE_DISPOSITION_INFORMATION_EX disposition_ex; IO_STATUS_BLOCK iosb; NTSTATUS status; + DWORD error; handle = CreateFileW(pathw, FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | DELETE, @@ -1097,10 +1112,18 @@ void fs__unlink(uv_fs_t* req) { return; } - if (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { - /* Do not allow deletion of directories, unless it is a symlink. When the - * path refers to a non-symlink directory, report EPERM as mandated by - * POSIX.1. */ + if (isrmdir && !(info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { + /* Error if we're in rmdir mode but it is not a dir. + * TODO: change it to UV_NOTDIR in v2. */ + SET_REQ_UV_ERROR(req, UV_ENOENT, ERROR_DIRECTORY); + CloseHandle(handle); + return; + } + + if (!isrmdir && (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { + /* If not explicitly allowed, do not allow deletion of directories, unless + * it is a symlink. When the path refers to a non-symlink directory, report + * EPERM as mandated by POSIX.1. */ /* Check if it is a reparse point. If it's not, it's a normal directory. */ if (!(info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { @@ -1112,7 +1135,7 @@ void fs__unlink(uv_fs_t* req) { /* Read the reparse point and check if it is a valid symlink. If not, don't * unlink. */ if (fs__readlink_handle(handle, NULL, NULL) < 0) { - DWORD error = GetLastError(); + error = GetLastError(); if (error == ERROR_SYMLINK_NOT_SUPPORTED) error = ERROR_ACCESS_DENIED; SET_REQ_WIN32_ERROR(req, error); @@ -1121,42 +1144,77 @@ void fs__unlink(uv_fs_t* req) { } } - if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) { - /* Remove read-only attribute */ - FILE_BASIC_INFORMATION basic = { 0 }; - - basic.FileAttributes = (info.dwFileAttributes & ~FILE_ATTRIBUTE_READONLY) | - FILE_ATTRIBUTE_ARCHIVE; - - status = pNtSetInformationFile(handle, - &iosb, - &basic, - sizeof basic, - FileBasicInformation); - if (!NT_SUCCESS(status)) { - SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); - CloseHandle(handle); - return; - } - } + /* Try posix delete first */ + disposition_ex.Flags = FILE_DISPOSITION_DELETE | FILE_DISPOSITION_POSIX_SEMANTICS | + FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE; - /* Try to set the delete flag. */ - disposition.DeleteFile = TRUE; status = pNtSetInformationFile(handle, &iosb, - &disposition, - sizeof disposition, - FileDispositionInformation); + &disposition_ex, + sizeof disposition_ex, + FileDispositionInformationEx); if (NT_SUCCESS(status)) { SET_REQ_SUCCESS(req); } else { - SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + /* If status == STATUS_CANNOT_DELETE here, given we set + * FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE, STATUS_CANNOT_DELETE can only mean + * that there is an existing mapped view to the file, preventing delete. + * STATUS_CANNOT_DELETE maps to UV_EACCES so it's not specifically worth handling */ + error = pRtlNtStatusToDosError(status); + if (error == ERROR_NOT_SUPPORTED /* filesystem does not support posix deletion */ || + error == ERROR_INVALID_PARAMETER /* pre Windows 10 error */ || + error == ERROR_INVALID_FUNCTION /* pre Windows 10 1607 error */) { + /* posix delete not supported so try fallback */ + if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) { + /* Remove read-only attribute */ + FILE_BASIC_INFORMATION basic = { 0 }; + + basic.FileAttributes = (info.dwFileAttributes & ~FILE_ATTRIBUTE_READONLY) | + FILE_ATTRIBUTE_ARCHIVE; + + status = pNtSetInformationFile(handle, + &iosb, + &basic, + sizeof basic, + FileBasicInformation); + if (!NT_SUCCESS(status)) { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + CloseHandle(handle); + return; + } + } + + /* Try to set the delete flag. */ + disposition.DeleteFile = TRUE; + status = pNtSetInformationFile(handle, + &iosb, + &disposition, + sizeof disposition, + FileDispositionInformation); + if (NT_SUCCESS(status)) { + SET_REQ_SUCCESS(req); + } else { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + } + } else { + SET_REQ_WIN32_ERROR(req, error); + } } CloseHandle(handle); } +static void fs__rmdir(uv_fs_t* req) { + fs__unlink_rmdir(req, /*isrmdir*/1); +} + + +static void fs__unlink(uv_fs_t* req) { + fs__unlink_rmdir(req, /*isrmdir*/0); +} + + void fs__mkdir(uv_fs_t* req) { /* TODO: use req->mode. */ if (CreateDirectoryW(req->file.pathw, NULL)) { @@ -1182,7 +1240,7 @@ void fs__mktemp(uv_fs_t* req, uv__fs_mktemp_func func) { size_t len; uint64_t v; char* path; - + path = (char*)req->path; len = wcslen(req->file.pathw); ep = req->file.pathw + len; @@ -1593,12 +1651,12 @@ void fs__readdir(uv_fs_t* req) { goto error; /* Copy file type. */ - if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) - dent.d_type = UV__DT_DIR; + if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_DEVICE) != 0) + dent.d_type = UV__DT_CHAR; else if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0) dent.d_type = UV__DT_LINK; - else if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_DEVICE) != 0) - dent.d_type = UV__DT_CHAR; + else if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) + dent.d_type = UV__DT_DIR; else dent.d_type = UV__DT_FILE; @@ -1627,6 +1685,43 @@ void fs__closedir(uv_fs_t* req) { SET_REQ_RESULT(req, 0); } +INLINE static fs__stat_path_return_t fs__stat_path(WCHAR* path, + uv_stat_t* statbuf, int do_lstat) { + FILE_STAT_BASIC_INFORMATION stat_info; + + // Check if the new fast API is available. + if (!pGetFileInformationByName) { + return FS__STAT_PATH_TRY_SLOW; + } + + // Check if the API call fails. + if (!pGetFileInformationByName(path, FileStatBasicByNameInfo, &stat_info, + sizeof(stat_info))) { + switch(GetLastError()) { + case ERROR_FILE_NOT_FOUND: + case ERROR_PATH_NOT_FOUND: + case ERROR_NOT_READY: + case ERROR_BAD_NET_NAME: + /* These errors aren't worth retrying with the slow path. */ + return FS__STAT_PATH_ERROR; + } + return FS__STAT_PATH_TRY_SLOW; + } + + // A file handle is needed to get st_size for links. + if ((stat_info.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { + return FS__STAT_PATH_TRY_SLOW; + } + + if (stat_info.DeviceType == FILE_DEVICE_NULL) { + fs__stat_assign_statbuf_null(statbuf); + return FS__STAT_PATH_SUCCESS; + } + + fs__stat_assign_statbuf(statbuf, stat_info, do_lstat); + return FS__STAT_PATH_SUCCESS; +} + INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, int do_lstat) { size_t target_length = 0; @@ -1635,6 +1730,7 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, FILE_FS_VOLUME_INFORMATION volume_info; NTSTATUS nt_status; IO_STATUS_BLOCK io_status; + FILE_STAT_BASIC_INFORMATION stat_info; nt_status = pNtQueryVolumeInformationFile(handle, &io_status, @@ -1650,13 +1746,7 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, /* If it's NUL device set fields as reasonable as possible and return. */ if (device_info.DeviceType == FILE_DEVICE_NULL) { - memset(statbuf, 0, sizeof(uv_stat_t)); - statbuf->st_mode = _S_IFCHR; - statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) | - ((_S_IREAD | _S_IWRITE) >> 6); - statbuf->st_nlink = 1; - statbuf->st_blksize = 4096; - statbuf->st_rdev = FILE_DEVICE_NULL << 16; + fs__stat_assign_statbuf_null(statbuf); return 0; } @@ -1680,14 +1770,65 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, /* Buffer overflow (a warning status code) is expected here. */ if (io_status.Status == STATUS_NOT_IMPLEMENTED) { - statbuf->st_dev = 0; + stat_info.VolumeSerialNumber.QuadPart = 0; } else if (NT_ERROR(nt_status)) { SetLastError(pRtlNtStatusToDosError(nt_status)); return -1; } else { - statbuf->st_dev = volume_info.VolumeSerialNumber; + stat_info.VolumeSerialNumber.QuadPart = volume_info.VolumeSerialNumber; + } + + stat_info.DeviceType = device_info.DeviceType; + stat_info.FileAttributes = file_info.BasicInformation.FileAttributes; + stat_info.NumberOfLinks = file_info.StandardInformation.NumberOfLinks; + stat_info.FileId.QuadPart = + file_info.InternalInformation.IndexNumber.QuadPart; + stat_info.ChangeTime.QuadPart = + file_info.BasicInformation.ChangeTime.QuadPart; + stat_info.CreationTime.QuadPart = + file_info.BasicInformation.CreationTime.QuadPart; + stat_info.LastAccessTime.QuadPart = + file_info.BasicInformation.LastAccessTime.QuadPart; + stat_info.LastWriteTime.QuadPart = + file_info.BasicInformation.LastWriteTime.QuadPart; + stat_info.AllocationSize.QuadPart = + file_info.StandardInformation.AllocationSize.QuadPart; + + if (do_lstat && + (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { + /* + * If reading the link fails, the reparse point is not a symlink and needs + * to be treated as a regular file. The higher level lstat function will + * detect this failure and retry without do_lstat if appropriate. + */ + if (fs__readlink_handle(handle, NULL, &target_length) != 0) { + fs__stat_assign_statbuf(statbuf, stat_info, do_lstat); + return -1; + } + stat_info.EndOfFile.QuadPart = target_length; + } else { + stat_info.EndOfFile.QuadPart = + file_info.StandardInformation.EndOfFile.QuadPart; } + fs__stat_assign_statbuf(statbuf, stat_info, do_lstat); + return 0; +} + +INLINE static void fs__stat_assign_statbuf_null(uv_stat_t* statbuf) { + memset(statbuf, 0, sizeof(uv_stat_t)); + statbuf->st_mode = _S_IFCHR; + statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) | + ((_S_IREAD | _S_IWRITE) >> 6); + statbuf->st_nlink = 1; + statbuf->st_blksize = 4096; + statbuf->st_rdev = FILE_DEVICE_NULL << 16; +} + +INLINE static void fs__stat_assign_statbuf(uv_stat_t* statbuf, + FILE_STAT_BASIC_INFORMATION stat_info, int do_lstat) { + statbuf->st_dev = stat_info.VolumeSerialNumber.QuadPart; + /* Todo: st_mode should probably always be 0666 for everyone. We might also * want to report 0777 if the file is a .exe or a directory. * @@ -1719,50 +1860,43 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, * target. Otherwise, reparse points must be treated as regular files. */ if (do_lstat && - (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { - /* - * If reading the link fails, the reparse point is not a symlink and needs - * to be treated as a regular file. The higher level lstat function will - * detect this failure and retry without do_lstat if appropriate. - */ - if (fs__readlink_handle(handle, NULL, &target_length) != 0) - return -1; + (stat_info.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { statbuf->st_mode |= S_IFLNK; - statbuf->st_size = target_length; + statbuf->st_size = stat_info.EndOfFile.QuadPart; } if (statbuf->st_mode == 0) { - if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + if (stat_info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) { statbuf->st_mode |= _S_IFDIR; statbuf->st_size = 0; } else { statbuf->st_mode |= _S_IFREG; - statbuf->st_size = file_info.StandardInformation.EndOfFile.QuadPart; + statbuf->st_size = stat_info.EndOfFile.QuadPart; } } - if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_READONLY) + if (stat_info.FileAttributes & FILE_ATTRIBUTE_READONLY) statbuf->st_mode |= _S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6); else statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) | ((_S_IREAD | _S_IWRITE) >> 6); uv__filetime_to_timespec(&statbuf->st_atim, - file_info.BasicInformation.LastAccessTime.QuadPart); + stat_info.LastAccessTime.QuadPart); uv__filetime_to_timespec(&statbuf->st_ctim, - file_info.BasicInformation.ChangeTime.QuadPart); + stat_info.ChangeTime.QuadPart); uv__filetime_to_timespec(&statbuf->st_mtim, - file_info.BasicInformation.LastWriteTime.QuadPart); + stat_info.LastWriteTime.QuadPart); uv__filetime_to_timespec(&statbuf->st_birthtim, - file_info.BasicInformation.CreationTime.QuadPart); + stat_info.CreationTime.QuadPart); - statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart; + statbuf->st_ino = stat_info.FileId.QuadPart; /* st_blocks contains the on-disk allocation size in 512-byte units. */ statbuf->st_blocks = - (uint64_t) file_info.StandardInformation.AllocationSize.QuadPart >> 9; + (uint64_t) stat_info.AllocationSize.QuadPart >> 9; - statbuf->st_nlink = file_info.StandardInformation.NumberOfLinks; + statbuf->st_nlink = stat_info.NumberOfLinks; /* The st_blksize is supposed to be the 'optimal' number of bytes for reading * and writing to the disk. That is, for any definition of 'optimal' - it's @@ -1794,8 +1928,6 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, statbuf->st_uid = 0; statbuf->st_rdev = 0; statbuf->st_gen = 0; - - return 0; } @@ -1817,6 +1949,17 @@ INLINE static DWORD fs__stat_impl_from_path(WCHAR* path, DWORD flags; DWORD ret; + // If new API exists, try to use it. + switch (fs__stat_path(path, statbuf, do_lstat)) { + case FS__STAT_PATH_SUCCESS: + return 0; + case FS__STAT_PATH_ERROR: + return GetLastError(); + case FS__STAT_PATH_TRY_SLOW: + break; + } + + // If the new API does not exist, use the old API. flags = FILE_FLAG_BACKUP_SEMANTICS; if (do_lstat) flags |= FILE_FLAG_OPEN_REPARSE_POINT; @@ -2830,7 +2973,7 @@ static void uv__fs_done(struct uv__work* w, int status) { uv_fs_t* req; req = container_of(w, uv_fs_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); if (status == UV_ECANCELED) { assert(req->result == 0); diff --git a/deps/uv/src/win/getaddrinfo.c b/deps/uv/src/win/getaddrinfo.c index 8b8406ada8e743..f20e10d49d974a 100644 --- a/deps/uv/src/win/getaddrinfo.c +++ b/deps/uv/src/win/getaddrinfo.c @@ -71,10 +71,9 @@ int uv__getaddrinfo_translate_error(int sys_err) { DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo); #endif - -/* Adjust size value to be multiple of 4. Use to keep pointer aligned. - * Do we need different versions of this for different architectures? */ -#define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2) +static size_t align_offset(size_t off, size_t alignment) { + return ((off + alignment - 1) / alignment) * alignment; +} #ifndef NDIS_IF_MAX_STRING_SIZE #define NDIS_IF_MAX_STRING_SIZE IF_MAX_STRING_SIZE @@ -103,17 +102,7 @@ static void uv__getaddrinfo_work(struct uv__work* w) { * Each size calculation is adjusted to avoid unaligned pointers. */ static void uv__getaddrinfo_done(struct uv__work* w, int status) { - uv_getaddrinfo_t* req; - size_t addrinfo_len = 0; - ssize_t name_len = 0; - size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo)); - struct addrinfoW* addrinfow_ptr; - struct addrinfo* addrinfo_ptr; - char* alloc_ptr = NULL; - char* cur_ptr = NULL; - int r; - - req = container_of(w, uv_getaddrinfo_t, work_req); + uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req); /* release input parameter memory */ uv__free(req->alloc); @@ -126,34 +115,44 @@ static void uv__getaddrinfo_done(struct uv__work* w, int status) { } if (req->retcode == 0) { + char* alloc_ptr = NULL; + size_t cur_off = 0; + size_t addrinfo_len; /* Convert addrinfoW to addrinfo. First calculate required length. */ - addrinfow_ptr = req->addrinfow; + struct addrinfoW* addrinfow_ptr = req->addrinfow; while (addrinfow_ptr != NULL) { - addrinfo_len += addrinfo_struct_len + - ALIGNED_SIZE(addrinfow_ptr->ai_addrlen); + cur_off = align_offset(cur_off, sizeof(void*)); + cur_off += sizeof(struct addrinfo); + /* TODO: This alignment could be smaller, if we could + portably get the alignment for sockaddr. */ + cur_off = align_offset(cur_off, sizeof(void*)); + cur_off += addrinfow_ptr->ai_addrlen; if (addrinfow_ptr->ai_canonname != NULL) { - name_len = uv_utf16_length_as_wtf8(addrinfow_ptr->ai_canonname, -1); + ssize_t name_len = + uv_utf16_length_as_wtf8(addrinfow_ptr->ai_canonname, -1); if (name_len < 0) { req->retcode = name_len; goto complete; } - addrinfo_len += ALIGNED_SIZE(name_len + 1); + cur_off += name_len + 1; } addrinfow_ptr = addrinfow_ptr->ai_next; } /* allocate memory for addrinfo results */ - alloc_ptr = (char*)uv__malloc(addrinfo_len); + addrinfo_len = cur_off; + alloc_ptr = uv__malloc(addrinfo_len); /* do conversions */ if (alloc_ptr != NULL) { - cur_ptr = alloc_ptr; + struct addrinfo *addrinfo_ptr = (struct addrinfo *)alloc_ptr; + cur_off = 0; addrinfow_ptr = req->addrinfow; - while (addrinfow_ptr != NULL) { + for (;;) { + cur_off += sizeof(struct addrinfo); + assert(cur_off <= addrinfo_len); /* copy addrinfo struct data */ - assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len); - addrinfo_ptr = (struct addrinfo*)cur_ptr; addrinfo_ptr->ai_family = addrinfow_ptr->ai_family; addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype; addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol; @@ -163,35 +162,37 @@ static void uv__getaddrinfo_done(struct uv__work* w, int status) { addrinfo_ptr->ai_addr = NULL; addrinfo_ptr->ai_next = NULL; - cur_ptr += addrinfo_struct_len; - /* copy sockaddr */ if (addrinfo_ptr->ai_addrlen > 0) { - assert(cur_ptr + addrinfo_ptr->ai_addrlen <= - alloc_ptr + addrinfo_len); - memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen); - addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr; - cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen); + cur_off = align_offset(cur_off, sizeof(void *)); + addrinfo_ptr->ai_addr = (struct sockaddr *)(alloc_ptr + cur_off); + cur_off += addrinfo_ptr->ai_addrlen; + assert(cur_off <= addrinfo_len); + memcpy(addrinfo_ptr->ai_addr, + addrinfow_ptr->ai_addr, + addrinfo_ptr->ai_addrlen); } /* convert canonical name to UTF-8 */ if (addrinfow_ptr->ai_canonname != NULL) { - name_len = alloc_ptr + addrinfo_len - cur_ptr; - r = uv__copy_utf16_to_utf8(addrinfow_ptr->ai_canonname, - -1, - cur_ptr, - (size_t*)&name_len); + ssize_t name_len = addrinfo_len - cur_off; + addrinfo_ptr->ai_canonname = alloc_ptr + cur_off; + int r = uv__copy_utf16_to_utf8(addrinfow_ptr->ai_canonname, + -1, + addrinfo_ptr->ai_canonname, + (size_t*)&name_len); assert(r == 0); - addrinfo_ptr->ai_canonname = cur_ptr; - cur_ptr += ALIGNED_SIZE(name_len + 1); + cur_off += name_len + 1; + assert(cur_off <= addrinfo_len); } - assert(cur_ptr <= alloc_ptr + addrinfo_len); /* set next ptr */ addrinfow_ptr = addrinfow_ptr->ai_next; - if (addrinfow_ptr != NULL) { - addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr; - } + if (addrinfow_ptr == NULL) + break; + cur_off = align_offset(cur_off, sizeof(void *)); + addrinfo_ptr = (struct addrinfo *)(alloc_ptr + cur_off); + addrinfo_ptr->ai_next = addrinfo_ptr; } req->addrinfo = (struct addrinfo*)alloc_ptr; } else { @@ -206,7 +207,7 @@ static void uv__getaddrinfo_done(struct uv__work* w, int status) { } complete: - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); /* finally do callback with converted result */ if (req->getaddrinfo_cb) @@ -242,10 +243,12 @@ int uv_getaddrinfo(uv_loop_t* loop, const char* service, const struct addrinfo* hints) { char hostname_ascii[256]; + size_t off = 0; size_t nodesize = 0; size_t servicesize = 0; + size_t serviceoff = 0; size_t hintssize = 0; - char* alloc_ptr = NULL; + size_t hintoff = 0; ssize_t rc; if (req == NULL || (node == NULL && service == NULL)) { @@ -268,6 +271,7 @@ int uv_getaddrinfo(uv_loop_t* loop, return rc; nodesize = strlen(hostname_ascii) + 1; node = hostname_ascii; + off += nodesize * sizeof(WCHAR); } if (service != NULL) { @@ -275,27 +279,28 @@ int uv_getaddrinfo(uv_loop_t* loop, if (rc < 0) return rc; servicesize = rc; + off = align_offset(off, sizeof(WCHAR)); + serviceoff = off; + off += servicesize * sizeof(WCHAR); } + if (hints != NULL) { - hintssize = ALIGNED_SIZE(sizeof(struct addrinfoW)); + off = align_offset(off, sizeof(void *)); + hintoff = off; + hintssize = sizeof(struct addrinfoW); + off += hintssize; } /* allocate memory for inputs, and partition it as needed */ - alloc_ptr = uv__malloc(ALIGNED_SIZE(nodesize * sizeof(WCHAR)) + - ALIGNED_SIZE(servicesize * sizeof(WCHAR)) + - hintssize); - if (!alloc_ptr) + req->alloc = uv__malloc(off); + if (!req->alloc) return UV_ENOMEM; - /* save alloc_ptr now so we can free if error */ - req->alloc = (void*) alloc_ptr; - /* Convert node string to UTF16 into allocated memory and save pointer in the * request. The node here has been converted to ascii. */ if (node != NULL) { - req->node = (WCHAR*) alloc_ptr; - uv_wtf8_to_utf16(node, (WCHAR*) alloc_ptr, nodesize); - alloc_ptr += ALIGNED_SIZE(nodesize * sizeof(WCHAR)); + req->node = (WCHAR*) req->alloc; + uv_wtf8_to_utf16(node, req->node, nodesize); } else { req->node = NULL; } @@ -303,16 +308,15 @@ int uv_getaddrinfo(uv_loop_t* loop, /* Convert service string to UTF16 into allocated memory and save pointer in * the req. */ if (service != NULL) { - req->service = (WCHAR*) alloc_ptr; - uv_wtf8_to_utf16(service, (WCHAR*) alloc_ptr, servicesize); - alloc_ptr += ALIGNED_SIZE(servicesize * sizeof(WCHAR)); + req->service = (WCHAR*) ((char*) req->alloc + serviceoff); + uv_wtf8_to_utf16(service, req->service, servicesize); } else { req->service = NULL; } /* copy hints to allocated memory and save pointer in req */ if (hints != NULL) { - req->addrinfow = (struct addrinfoW*) alloc_ptr; + req->addrinfow = (struct addrinfoW*) ((char*) req->alloc + hintoff); req->addrinfow->ai_family = hints->ai_family; req->addrinfow->ai_socktype = hints->ai_socktype; req->addrinfow->ai_protocol = hints->ai_protocol; @@ -325,7 +329,7 @@ int uv_getaddrinfo(uv_loop_t* loop, req->addrinfow = NULL; } - uv__req_register(loop, req); + uv__req_register(loop); if (getaddrinfo_cb) { uv__work_submit(loop, diff --git a/deps/uv/src/win/getnameinfo.c b/deps/uv/src/win/getnameinfo.c index 32863176ef6403..695549580d2d81 100644 --- a/deps/uv/src/win/getnameinfo.c +++ b/deps/uv/src/win/getnameinfo.c @@ -82,7 +82,7 @@ static void uv__getnameinfo_done(struct uv__work* w, int status) { char* service; req = container_of(w, uv_getnameinfo_t, work_req); - uv__req_unregister(req->loop, req); + uv__req_unregister(req->loop); host = service = NULL; if (status == UV_ECANCELED) { @@ -124,7 +124,7 @@ int uv_getnameinfo(uv_loop_t* loop, } UV_REQ_INIT(req, UV_GETNAMEINFO); - uv__req_register(loop, req); + uv__req_register(loop); req->getnameinfo_cb = getnameinfo_cb; req->flags = flags; diff --git a/deps/uv/src/win/internal.h b/deps/uv/src/win/internal.h index 867dea5e0ed34f..be408af6661026 100644 --- a/deps/uv/src/win/internal.h +++ b/deps/uv/src/win/internal.h @@ -330,4 +330,6 @@ void uv__wake_all_loops(void); */ void uv__init_detect_system_wakeup(void); +int uv_translate_write_sys_error(int sys_errno); + #endif /* UV_WIN_INTERNAL_H_ */ diff --git a/deps/uv/src/win/pipe.c b/deps/uv/src/win/pipe.c index 3c8abe1c28ce36..d46ecb9fc702e6 100644 --- a/deps/uv/src/win/pipe.c +++ b/deps/uv/src/win/pipe.c @@ -106,8 +106,8 @@ static int includes_nul(const char *s, size_t n) { } -static void uv__unique_pipe_name(char* ptr, char* name, size_t size) { - snprintf(name, size, "\\\\?\\pipe\\uv\\%p-%lu", ptr, GetCurrentProcessId()); +static void uv__unique_pipe_name(unsigned long long ptr, char* name, size_t size) { + snprintf(name, size, "\\\\?\\pipe\\uv\\%llu-%lu", ptr, GetCurrentProcessId()); } @@ -208,7 +208,7 @@ static void close_pipe(uv_pipe_t* pipe) { static int uv__pipe_server( HANDLE* pipeHandle_ptr, DWORD access, - char* name, size_t nameSize, char* random) { + char* name, size_t nameSize, unsigned long long random) { HANDLE pipeHandle; int err; @@ -249,7 +249,7 @@ static int uv__pipe_server( static int uv__create_pipe_pair( HANDLE* server_pipe_ptr, HANDLE* client_pipe_ptr, unsigned int server_flags, unsigned int client_flags, - int inherit_client, char* random) { + int inherit_client, unsigned long long random) { /* allowed flags are: UV_READABLE_PIPE | UV_WRITABLE_PIPE | UV_NONBLOCK_PIPE */ char pipe_name[64]; SECURITY_ATTRIBUTES sa; @@ -357,7 +357,12 @@ int uv_pipe(uv_file fds[2], int read_flags, int write_flags) { /* TODO: better source of local randomness than &fds? */ read_flags |= UV_READABLE_PIPE; write_flags |= UV_WRITABLE_PIPE; - err = uv__create_pipe_pair(&readh, &writeh, read_flags, write_flags, 0, (char*) &fds[0]); + err = uv__create_pipe_pair(&readh, + &writeh, + read_flags, + write_flags, + 0, + (uintptr_t) &fds[0]); if (err != 0) return err; temp[0] = _open_osfhandle((intptr_t) readh, 0); @@ -421,7 +426,7 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop, } err = uv__create_pipe_pair(&server_pipe, &client_pipe, - server_flags, client_flags, 1, (char*) server_pipe); + server_flags, client_flags, 1, (uintptr_t) server_pipe); if (err) goto error; @@ -667,15 +672,10 @@ void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) { } handle->pipe.conn.ipc_xfer_queue_length = 0; - if (handle->flags & UV_HANDLE_EMULATE_IOCP) { - if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) { - UnregisterWait(handle->read_req.wait_handle); - handle->read_req.wait_handle = INVALID_HANDLE_VALUE; - } - if (handle->read_req.event_handle != NULL) { - CloseHandle(handle->read_req.event_handle); - handle->read_req.event_handle = NULL; - } + assert(handle->read_req.wait_handle == INVALID_HANDLE_VALUE); + if (handle->read_req.event_handle != NULL) { + CloseHandle(handle->read_req.event_handle); + handle->read_req.event_handle = NULL; } if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) @@ -868,7 +868,7 @@ void uv_pipe_connect(uv_connect_t* req, SET_REQ_ERROR(req, err); uv__insert_pending_req(loop, (uv_req_t*) req); handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); } } @@ -959,7 +959,7 @@ int uv_pipe_connect2(uv_connect_t* req, goto error; } - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); handle->reqs_pending++; return 0; @@ -974,7 +974,7 @@ int uv_pipe_connect2(uv_connect_t* req, SET_REQ_SUCCESS(req); uv__insert_pending_req(loop, (uv_req_t*) req); handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); return 0; error: @@ -992,7 +992,7 @@ int uv_pipe_connect2(uv_connect_t* req, SET_REQ_ERROR(req, err); uv__insert_pending_req(loop, (uv_req_t*) req); handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); return 0; } @@ -1417,13 +1417,12 @@ static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) { } if (handle->flags & UV_HANDLE_EMULATE_IOCP) { - if (req->wait_handle == INVALID_HANDLE_VALUE) { - if (!RegisterWaitForSingleObject(&req->wait_handle, - req->event_handle, post_completion_read_wait, (void*) req, - INFINITE, WT_EXECUTEINWAITTHREAD)) { - SET_REQ_ERROR(req, GetLastError()); - goto error; - } + assert(req->wait_handle == INVALID_HANDLE_VALUE); + if (!RegisterWaitForSingleObject(&req->wait_handle, + req->event_handle, post_completion_read_wait, (void*) req, + INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) { + SET_REQ_ERROR(req, GetLastError()); + goto error; } } } @@ -1451,16 +1450,16 @@ int uv__pipe_read_start(uv_pipe_t* handle, handle->read_cb = read_cb; handle->alloc_cb = alloc_cb; + if (handle->read_req.event_handle == NULL) { + handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL); + if (handle->read_req.event_handle == NULL) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + } + /* If reading was stopped and then started again, there could still be a read * request pending. */ if (!(handle->flags & UV_HANDLE_READ_PENDING)) { - if (handle->flags & UV_HANDLE_EMULATE_IOCP && - handle->read_req.event_handle == NULL) { - handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL); - if (handle->read_req.event_handle == NULL) { - uv_fatal_error(GetLastError(), "CreateEvent"); - } - } uv__pipe_queue_read(loop, handle); } @@ -1638,7 +1637,7 @@ static int uv__pipe_write_data(uv_loop_t* loop, req->u.io.queued_bytes = 0; } - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; POST_COMPLETION_FOR_REQ(loop, req); @@ -1686,7 +1685,7 @@ static int uv__pipe_write_data(uv_loop_t* loop, CloseHandle(req->event_handle); req->event_handle = NULL; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; return 0; @@ -1713,13 +1712,13 @@ static int uv__pipe_write_data(uv_loop_t* loop, if (handle->flags & UV_HANDLE_EMULATE_IOCP) { if (!RegisterWaitForSingleObject(&req->wait_handle, req->event_handle, post_completion_write_wait, (void*) req, - INFINITE, WT_EXECUTEINWAITTHREAD)) { + INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) { return GetLastError(); } } } - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; @@ -1889,7 +1888,7 @@ static void uv__pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error, static void uv__pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle, - int error, uv_buf_t buf) { + DWORD error, uv_buf_t buf) { if (error == ERROR_BROKEN_PIPE) { uv__pipe_read_eof(loop, handle, buf); } else { @@ -1919,17 +1918,25 @@ static void uv__pipe_queue_ipc_xfer_info( /* Read an exact number of bytes from a pipe. If an error or end-of-file is * encountered before the requested number of bytes are read, an error is * returned. */ -static int uv__pipe_read_exactly(HANDLE h, void* buffer, DWORD count) { - DWORD bytes_read, bytes_read_now; +static DWORD uv__pipe_read_exactly(uv_pipe_t* handle, void* buffer, DWORD count) { + uv_read_t* req; + DWORD bytes_read; + DWORD bytes_read_now; bytes_read = 0; while (bytes_read < count) { - if (!ReadFile(h, + req = &handle->read_req; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + req->u.io.overlapped.hEvent = (HANDLE) ((uintptr_t) req->event_handle | 1); + if (!ReadFile(handle->handle, (char*) buffer + bytes_read, count - bytes_read, &bytes_read_now, - NULL)) { - return GetLastError(); + &req->u.io.overlapped)) { + if (GetLastError() != ERROR_IO_PENDING) + return GetLastError(); + if (!GetOverlappedResult(handle->handle, &req->u.io.overlapped, &bytes_read_now, TRUE)) + return GetLastError(); } bytes_read += bytes_read_now; @@ -1940,16 +1947,19 @@ static int uv__pipe_read_exactly(HANDLE h, void* buffer, DWORD count) { } -static DWORD uv__pipe_read_data(uv_loop_t* loop, - uv_pipe_t* handle, - DWORD suggested_bytes, - DWORD max_bytes) { - DWORD bytes_read; +static int uv__pipe_read_data(uv_loop_t* loop, + uv_pipe_t* handle, + DWORD* bytes_read, /* inout argument */ + DWORD max_bytes) { uv_buf_t buf; + uv_read_t* req; + DWORD r; + DWORD bytes_available; + int more; /* Ask the user for a buffer to read data into. */ buf = uv_buf_init(NULL, 0); - handle->alloc_cb((uv_handle_t*) handle, suggested_bytes, &buf); + handle->alloc_cb((uv_handle_t*) handle, *bytes_read, &buf); if (buf.base == NULL || buf.len == 0) { handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf); return 0; /* Break out of read loop. */ @@ -1958,33 +1968,77 @@ static DWORD uv__pipe_read_data(uv_loop_t* loop, /* Ensure we read at most the smaller of: * (a) the length of the user-allocated buffer. * (b) the maximum data length as specified by the `max_bytes` argument. + * (c) the amount of data that can be read non-blocking */ if (max_bytes > buf.len) max_bytes = buf.len; - /* Read into the user buffer. */ - if (!ReadFile(handle->handle, buf.base, max_bytes, &bytes_read, NULL)) { - uv__pipe_read_error_or_eof(loop, handle, GetLastError(), buf); - return 0; /* Break out of read loop. */ + if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) { + /* The user failed to supply a pipe that can be used non-blocking or with + * threads. Try to estimate the amount of data that is safe to read without + * blocking, in a race-y way however. */ + bytes_available = 0; + if (!PeekNamedPipe(handle->handle, NULL, 0, NULL, &bytes_available, NULL)) { + r = GetLastError(); + } else { + if (max_bytes > bytes_available) + max_bytes = bytes_available; + *bytes_read = 0; + if (max_bytes == 0 || ReadFile(handle->handle, buf.base, max_bytes, bytes_read, NULL)) + r = ERROR_SUCCESS; + else + r = GetLastError(); + } + more = max_bytes < bytes_available; + } else { + /* Read into the user buffer. + * Prepare an Event so that we can cancel if it doesn't complete immediately. + */ + req = &handle->read_req; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + req->u.io.overlapped.hEvent = (HANDLE) ((uintptr_t) req->event_handle | 1); + if (ReadFile(handle->handle, buf.base, max_bytes, bytes_read, &req->u.io.overlapped)) { + r = ERROR_SUCCESS; + } else { + r = GetLastError(); + *bytes_read = 0; + if (r == ERROR_IO_PENDING) { + r = CancelIoEx(handle->handle, &req->u.io.overlapped); + assert(r || GetLastError() == ERROR_NOT_FOUND); + if (GetOverlappedResult(handle->handle, &req->u.io.overlapped, bytes_read, TRUE)) { + r = ERROR_SUCCESS; + } else { + r = GetLastError(); + *bytes_read = 0; + } + } + } + more = *bytes_read == max_bytes; } /* Call the read callback. */ - handle->read_cb((uv_stream_t*) handle, bytes_read, &buf); + if (r == ERROR_SUCCESS || r == ERROR_OPERATION_ABORTED) + handle->read_cb((uv_stream_t*) handle, *bytes_read, &buf); + else + uv__pipe_read_error_or_eof(loop, handle, r, buf); - return bytes_read; + return more; } -static DWORD uv__pipe_read_ipc(uv_loop_t* loop, uv_pipe_t* handle) { - uint32_t* data_remaining = &handle->pipe.conn.ipc_data_frame.payload_remaining; - int err; +static int uv__pipe_read_ipc(uv_loop_t* loop, uv_pipe_t* handle) { + uint32_t* data_remaining; + DWORD err; + DWORD more; + DWORD bytes_read; + + data_remaining = &handle->pipe.conn.ipc_data_frame.payload_remaining; if (*data_remaining > 0) { /* Read frame data payload. */ - DWORD bytes_read = - uv__pipe_read_data(loop, handle, *data_remaining, *data_remaining); + bytes_read = *data_remaining; + more = uv__pipe_read_data(loop, handle, &bytes_read, bytes_read); *data_remaining -= bytes_read; - return bytes_read; } else { /* Start of a new IPC frame. */ @@ -1995,7 +2049,7 @@ static DWORD uv__pipe_read_ipc(uv_loop_t* loop, uv_pipe_t* handle) { /* Read the IPC frame header. */ err = uv__pipe_read_exactly( - handle->handle, &frame_header, sizeof frame_header); + handle, &frame_header, sizeof frame_header); if (err) goto error; @@ -2031,21 +2085,24 @@ static DWORD uv__pipe_read_ipc(uv_loop_t* loop, uv_pipe_t* handle) { /* If no socket xfer info follows, return here. Data will be read in a * subsequent invocation of uv__pipe_read_ipc(). */ - if (xfer_type == UV__IPC_SOCKET_XFER_NONE) - return sizeof frame_header; /* Number of bytes read. */ - - /* Read transferred socket information. */ - err = uv__pipe_read_exactly(handle->handle, &xfer_info, sizeof xfer_info); - if (err) - goto error; - - /* Store the pending socket info. */ - uv__pipe_queue_ipc_xfer_info(handle, xfer_type, &xfer_info); + if (xfer_type != UV__IPC_SOCKET_XFER_NONE) { + /* Read transferred socket information. */ + err = uv__pipe_read_exactly(handle, &xfer_info, sizeof xfer_info); + if (err) + goto error; - /* Return number of bytes read. */ - return sizeof frame_header + sizeof xfer_info; + /* Store the pending socket info. */ + uv__pipe_queue_ipc_xfer_info(handle, xfer_type, &xfer_info); + } } + /* Return whether the caller should immediately try another read call to get + * more data. Calling uv__pipe_read_exactly will hang if there isn't data + * available, so we cannot do this unless we are guaranteed not to reach that. + */ + more = *data_remaining > 0; + return more; + invalid: /* Invalid frame. */ err = WSAECONNABORTED; /* Maps to UV_ECONNABORTED. */ @@ -2059,12 +2116,20 @@ static DWORD uv__pipe_read_ipc(uv_loop_t* loop, uv_pipe_t* handle) { void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle, uv_req_t* req) { + DWORD err; + DWORD more; + DWORD bytes_requested; assert(handle->type == UV_NAMED_PIPE); handle->flags &= ~(UV_HANDLE_READ_PENDING | UV_HANDLE_CANCELLATION_PENDING); DECREASE_PENDING_REQ_COUNT(handle); eof_timer_stop(handle); + if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(handle->read_req.wait_handle); + handle->read_req.wait_handle = INVALID_HANDLE_VALUE; + } + /* At this point, we're done with bookkeeping. If the user has stopped * reading the pipe in the meantime, there is nothing left to do, since there * is no callback that we can call. */ @@ -2073,7 +2138,7 @@ void uv__process_pipe_read_req(uv_loop_t* loop, if (!REQ_SUCCESS(req)) { /* An error occurred doing the zero-read. */ - DWORD err = GET_REQ_ERROR(req); + err = GET_REQ_ERROR(req); /* If the read was cancelled by uv__pipe_interrupt_read(), the request may * indicate an ERROR_OPERATION_ABORTED error. This error isn't relevant to @@ -2084,34 +2149,18 @@ void uv__process_pipe_read_req(uv_loop_t* loop, } else { /* The zero-read completed without error, indicating there is data * available in the kernel buffer. */ - DWORD avail; - - /* Get the number of bytes available. */ - avail = 0; - if (!PeekNamedPipe(handle->handle, NULL, 0, NULL, &avail, NULL)) - uv__pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_); - - /* Read until we've either read all the bytes available, or the 'reading' - * flag is cleared. */ - while (avail > 0 && handle->flags & UV_HANDLE_READING) { + while (handle->flags & UV_HANDLE_READING) { + bytes_requested = 65536; /* Depending on the type of pipe, read either IPC frames or raw data. */ - DWORD bytes_read = - handle->ipc ? uv__pipe_read_ipc(loop, handle) - : uv__pipe_read_data(loop, handle, avail, (DWORD) -1); + if (handle->ipc) + more = uv__pipe_read_ipc(loop, handle); + else + more = uv__pipe_read_data(loop, handle, &bytes_requested, INT32_MAX); /* If no bytes were read, treat this as an indication that an error * occurred, and break out of the read loop. */ - if (bytes_read == 0) - break; - - /* It is possible that more bytes were read than we thought were - * available. To prevent `avail` from underflowing, break out of the loop - * if this is the case. */ - if (bytes_read > avail) + if (more == 0) break; - - /* Recompute the number of bytes available. */ - avail -= bytes_read; } } @@ -2132,17 +2181,15 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle, assert(handle->write_queue_size >= req->u.io.queued_bytes); handle->write_queue_size -= req->u.io.queued_bytes; - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); - if (handle->flags & UV_HANDLE_EMULATE_IOCP) { - if (req->wait_handle != INVALID_HANDLE_VALUE) { - UnregisterWait(req->wait_handle); - req->wait_handle = INVALID_HANDLE_VALUE; - } - if (req->event_handle) { - CloseHandle(req->event_handle); - req->event_handle = NULL; - } + if (req->wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(req->wait_handle); + req->wait_handle = INVALID_HANDLE_VALUE; + } + if (req->event_handle) { + CloseHandle(req->event_handle); + req->event_handle = NULL; } err = GET_REQ_ERROR(req); @@ -2219,7 +2266,7 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle, assert(handle->type == UV_NAMED_PIPE); - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); err = 0; if (REQ_SUCCESS(req)) { @@ -2251,7 +2298,7 @@ void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle, /* Clear the shutdown_req field so we don't go here again. */ handle->stream.conn.shutdown_req = NULL; - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); if (handle->flags & UV_HANDLE_CLOSING) { /* Already closing. Cancel the shutdown. */ diff --git a/deps/uv/src/win/process-stdio.c b/deps/uv/src/win/process-stdio.c index 0db35723731505..181db92ea30d45 100644 --- a/deps/uv/src/win/process-stdio.c +++ b/deps/uv/src/win/process-stdio.c @@ -46,12 +46,12 @@ #define CHILD_STDIO_CRT_FLAGS(buffer, fd) \ *((unsigned char*) (buffer) + sizeof(int) + fd) -#define CHILD_STDIO_HANDLE(buffer, fd) \ - *((HANDLE*) ((unsigned char*) (buffer) + \ - sizeof(int) + \ - sizeof(unsigned char) * \ - CHILD_STDIO_COUNT((buffer)) + \ - sizeof(HANDLE) * (fd))) +#define CHILD_STDIO_HANDLE(buffer, fd) \ + ((void*) ((unsigned char*) (buffer) + \ + sizeof(int) + \ + sizeof(unsigned char) * \ + CHILD_STDIO_COUNT((buffer)) + \ + sizeof(HANDLE) * (fd))) /* CRT file descriptor mode flags */ @@ -194,7 +194,7 @@ int uv__stdio_create(uv_loop_t* loop, CHILD_STDIO_COUNT(buffer) = count; for (i = 0; i < count; i++) { CHILD_STDIO_CRT_FLAGS(buffer, i) = 0; - CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE; + memset(CHILD_STDIO_HANDLE(buffer, i), 0xFF, sizeof(HANDLE)); } for (i = 0; i < count; i++) { @@ -215,14 +215,15 @@ int uv__stdio_create(uv_loop_t* loop, * handles in the stdio buffer are initialized with. * INVALID_HANDLE_VALUE, which should be okay. */ if (i <= 2) { + HANDLE nul; DWORD access = (i == 0) ? FILE_GENERIC_READ : FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES; - err = uv__create_nul_handle(&CHILD_STDIO_HANDLE(buffer, i), - access); + err = uv__create_nul_handle(&nul, access); if (err) goto error; + memcpy(CHILD_STDIO_HANDLE(buffer, i), &nul, sizeof(HANDLE)); CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV; } break; @@ -247,7 +248,7 @@ int uv__stdio_create(uv_loop_t* loop, if (err) goto error; - CHILD_STDIO_HANDLE(buffer, i) = child_pipe; + memcpy(CHILD_STDIO_HANDLE(buffer, i), &child_pipe, sizeof(HANDLE)); CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE; break; } @@ -263,7 +264,7 @@ int uv__stdio_create(uv_loop_t* loop, * error. */ if (fdopt.data.fd <= 2 && err == ERROR_INVALID_HANDLE) { CHILD_STDIO_CRT_FLAGS(buffer, i) = 0; - CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE; + memset(CHILD_STDIO_HANDLE(buffer, i), 0xFF, sizeof(HANDLE)); break; } goto error; @@ -298,7 +299,7 @@ int uv__stdio_create(uv_loop_t* loop, return -1; } - CHILD_STDIO_HANDLE(buffer, i) = child_handle; + memcpy(CHILD_STDIO_HANDLE(buffer, i), &child_handle, sizeof(HANDLE)); break; } @@ -334,7 +335,7 @@ int uv__stdio_create(uv_loop_t* loop, if (err) goto error; - CHILD_STDIO_HANDLE(buffer, i) = child_handle; + memcpy(CHILD_STDIO_HANDLE(buffer, i), &child_handle, sizeof(HANDLE)); CHILD_STDIO_CRT_FLAGS(buffer, i) = crt_flags; break; } @@ -359,7 +360,7 @@ void uv__stdio_destroy(BYTE* buffer) { count = CHILD_STDIO_COUNT(buffer); for (i = 0; i < count; i++) { - HANDLE handle = CHILD_STDIO_HANDLE(buffer, i); + HANDLE handle = uv__stdio_handle(buffer, i); if (handle != INVALID_HANDLE_VALUE) { CloseHandle(handle); } @@ -374,7 +375,7 @@ void uv__stdio_noinherit(BYTE* buffer) { count = CHILD_STDIO_COUNT(buffer); for (i = 0; i < count; i++) { - HANDLE handle = CHILD_STDIO_HANDLE(buffer, i); + HANDLE handle = uv__stdio_handle(buffer, i); if (handle != INVALID_HANDLE_VALUE) { SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0); } @@ -412,5 +413,7 @@ WORD uv__stdio_size(BYTE* buffer) { HANDLE uv__stdio_handle(BYTE* buffer, int fd) { - return CHILD_STDIO_HANDLE(buffer, fd); + HANDLE handle; + memcpy(&handle, CHILD_STDIO_HANDLE(buffer, fd), sizeof(HANDLE)); + return handle; } diff --git a/deps/uv/src/win/process.c b/deps/uv/src/win/process.c index 4e94dee90e13ee..9d48ddc6f84d6f 100644 --- a/deps/uv/src/win/process.c +++ b/deps/uv/src/win/process.c @@ -26,7 +26,6 @@ #include #include #include -#include /* _alloca */ #include "uv.h" #include "internal.h" @@ -598,11 +597,9 @@ int make_program_args(char** args, int verbatim_arguments, WCHAR** dst_ptr) { } -int env_strncmp(const wchar_t* a, int na, const wchar_t* b) { +static int env_strncmp(const wchar_t* a, int na, const wchar_t* b) { wchar_t* a_eq; wchar_t* b_eq; - wchar_t* A; - wchar_t* B; int nb; int r; @@ -617,27 +614,8 @@ int env_strncmp(const wchar_t* a, int na, const wchar_t* b) { assert(b_eq); nb = b_eq - b; - A = _alloca((na+1) * sizeof(wchar_t)); - B = _alloca((nb+1) * sizeof(wchar_t)); - - r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, a, na, A, na); - assert(r==na); - A[na] = L'\0'; - r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, b, nb, B, nb); - assert(r==nb); - B[nb] = L'\0'; - - for (;;) { - wchar_t AA = *A++; - wchar_t BB = *B++; - if (AA < BB) { - return -1; - } else if (AA > BB) { - return 1; - } else if (!AA && !BB) { - return 0; - } - } + r = CompareStringOrdinal(a, na, b, nb, /*case insensitive*/TRUE); + return r - CSTR_EQUAL; } @@ -676,6 +654,7 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) { WCHAR* dst_copy; WCHAR** ptr_copy; WCHAR** env_copy; + char* p; size_t required_vars_value_len[ARRAY_SIZE(required_vars)]; /* first pass: determine size in UTF-16 */ @@ -691,11 +670,13 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) { } /* second pass: copy to UTF-16 environment block */ - dst_copy = uv__malloc(env_len * sizeof(WCHAR)); - if (dst_copy == NULL && env_len > 0) { + len = env_block_count * sizeof(WCHAR*); + p = uv__malloc(len + env_len * sizeof(WCHAR)); + if (p == NULL) { return UV_ENOMEM; } - env_copy = _alloca(env_block_count * sizeof(WCHAR*)); + env_copy = (void*) &p[0]; + dst_copy = (void*) &p[len]; ptr = dst_copy; ptr_copy = env_copy; @@ -745,7 +726,7 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) { /* final pass: copy, in sort order, and inserting required variables */ dst = uv__malloc((1+env_len) * sizeof(WCHAR)); if (!dst) { - uv__free(dst_copy); + uv__free(p); return UV_ENOMEM; } @@ -790,7 +771,7 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) { assert(env_len == (size_t) (ptr - dst)); *ptr = L'\0'; - uv__free(dst_copy); + uv__free(p); *dst_ptr = dst; return 0; } @@ -1308,16 +1289,34 @@ static int uv__kill(HANDLE process_handle, int signum) { /* Unconditionally terminate the process. On Windows, killed processes * normally return 1. */ int err; + DWORD status; if (TerminateProcess(process_handle, 1)) return 0; - /* If the process already exited before TerminateProcess was called,. + /* If the process already exited before TerminateProcess was called, * TerminateProcess will fail with ERROR_ACCESS_DENIED. */ err = GetLastError(); - if (err == ERROR_ACCESS_DENIED && - WaitForSingleObject(process_handle, 0) == WAIT_OBJECT_0) { - return UV_ESRCH; + if (err == ERROR_ACCESS_DENIED) { + /* First check using GetExitCodeProcess() with status different from + * STILL_ACTIVE (259). This check can be set incorrectly by the process, + * though that is uncommon. */ + if (GetExitCodeProcess(process_handle, &status) && + status != STILL_ACTIVE) { + return UV_ESRCH; + } + + /* But the process could have exited with code == STILL_ACTIVE, use then + * WaitForSingleObject with timeout zero. This is prone to a race + * condition as it could return WAIT_TIMEOUT because the handle might + * not have been signaled yet.That would result in returning the wrong + * error code here (UV_EACCES instead of UV_ESRCH), but we cannot fix + * the kernel synchronization issue that TerminateProcess is + * inconsistent with WaitForSingleObject with just the APIs available to + * us in user space. */ + if (WaitForSingleObject(process_handle, 0) == WAIT_OBJECT_0) { + return UV_ESRCH; + } } return uv_translate_sys_error(err); @@ -1325,6 +1324,14 @@ static int uv__kill(HANDLE process_handle, int signum) { case 0: { /* Health check: is the process still alive? */ + DWORD status; + + if (!GetExitCodeProcess(process_handle, &status)) + return uv_translate_sys_error(GetLastError()); + + if (status != STILL_ACTIVE) + return UV_ESRCH; + switch (WaitForSingleObject(process_handle, 0)) { case WAIT_OBJECT_0: return UV_ESRCH; diff --git a/deps/uv/src/win/req-inl.h b/deps/uv/src/win/req-inl.h index 9e2075906f53ef..cf16e8ba41fa73 100644 --- a/deps/uv/src/win/req-inl.h +++ b/deps/uv/src/win/req-inl.h @@ -53,16 +53,16 @@ (uv__ntstatus_to_winsock_error(GET_REQ_STATUS((req)))) -#define REGISTER_HANDLE_REQ(loop, handle, req) \ +#define REGISTER_HANDLE_REQ(loop, handle) \ do { \ INCREASE_ACTIVE_COUNT((loop), (handle)); \ - uv__req_register((loop), (req)); \ + uv__req_register((loop)); \ } while (0) -#define UNREGISTER_HANDLE_REQ(loop, handle, req) \ +#define UNREGISTER_HANDLE_REQ(loop, handle) \ do { \ DECREASE_ACTIVE_COUNT((loop), (handle)); \ - uv__req_unregister((loop), (req)); \ + uv__req_unregister((loop)); \ } while (0) @@ -83,7 +83,7 @@ INLINE static uv_req_t* uv__overlapped_to_req(OVERLAPPED* overlapped) { - return CONTAINING_RECORD(overlapped, uv_req_t, u.io.overlapped); + return container_of(overlapped, uv_req_t, u.io.overlapped); } diff --git a/deps/uv/src/win/signal.c b/deps/uv/src/win/signal.c index 8c79871b9bb07f..85730b27b2b212 100644 --- a/deps/uv/src/win/signal.c +++ b/deps/uv/src/win/signal.c @@ -91,7 +91,7 @@ int uv__signal_dispatch(int signum) { for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup); handle != NULL && handle->signum == signum; - handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) { + handle = RB_NEXT(uv_signal_tree_s, handle)) { unsigned long previous = InterlockedExchange( (volatile LONG*) &handle->pending_signum, signum); diff --git a/deps/uv/src/win/stream.c b/deps/uv/src/win/stream.c index 7bf9ca388cb0f0..a53a10b03823e1 100644 --- a/deps/uv/src/win/stream.c +++ b/deps/uv/src/win/stream.c @@ -131,7 +131,7 @@ int uv_write(uv_write_t* req, case UV_NAMED_PIPE: err = uv__pipe_write( loop, req, (uv_pipe_t*) handle, bufs, nbufs, NULL, cb); - break; + return uv_translate_write_sys_error(err); case UV_TTY: err = uv__tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb); break; @@ -164,7 +164,7 @@ int uv_write2(uv_write_t* req, err = uv__pipe_write( loop, req, (uv_pipe_t*) handle, bufs, nbufs, send_handle, cb); - return uv_translate_sys_error(err); + return uv_translate_write_sys_error(err); } @@ -216,7 +216,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { handle->flags &= ~UV_HANDLE_WRITABLE; handle->stream.conn.shutdown_req = req; handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); if (handle->stream.conn.write_reqs_pending == 0) { if (handle->type == UV_NAMED_PIPE) diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c index 187f36e2a61c87..c452c12e8f06f1 100644 --- a/deps/uv/src/win/tcp.c +++ b/deps/uv/src/win/tcp.c @@ -58,11 +58,17 @@ static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsign return WSAGetLastError(); } - if (enable && setsockopt(socket, - IPPROTO_TCP, - TCP_KEEPALIVE, - (const char*)&delay, - sizeof delay) == -1) { + if (!enable) + return 0; + + if (delay < 1) + return UV_EINVAL; + + if (setsockopt(socket, + IPPROTO_TCP, + TCP_KEEPALIVE, + (const char*)&delay, + sizeof delay) == -1) { return WSAGetLastError(); } @@ -206,7 +212,7 @@ void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown assert(stream->flags & UV_HANDLE_CONNECTION); stream->stream.conn.shutdown_req = NULL; - UNREGISTER_HANDLE_REQ(loop, stream, req); + UNREGISTER_HANDLE_REQ(loop, stream); err = 0; if (stream->flags & UV_HANDLE_CLOSING) @@ -286,6 +292,12 @@ static int uv__tcp_try_bind(uv_tcp_t* handle, DWORD err; int r; + /* There is no SO_REUSEPORT on Windows, Windows only knows SO_REUSEADDR. + * so we just return an error directly when UV_TCP_REUSEPORT is requested + * for binding the socket. */ + if (flags & UV_TCP_REUSEPORT) + return ERROR_NOT_SUPPORTED; + if (handle->socket == INVALID_SOCKET) { SOCKET sock; @@ -822,7 +834,7 @@ static int uv__tcp_try_connect(uv_connect_t* req, if (handle->delayed_error != 0) { /* Process the req without IOCP. */ handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); uv__insert_pending_req(loop, (uv_req_t*)req); return 0; } @@ -838,12 +850,12 @@ static int uv__tcp_try_connect(uv_connect_t* req, if (UV_SUCCEEDED_WITHOUT_IOCP(success)) { /* Process the req without IOCP. */ handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); uv__insert_pending_req(loop, (uv_req_t*)req); } else if (UV_SUCCEEDED_WITH_IOCP(success)) { /* The req will be processed with IOCP. */ handle->reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); } else { return WSAGetLastError(); } @@ -913,14 +925,14 @@ int uv__tcp_write(uv_loop_t* loop, req->u.io.queued_bytes = 0; handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); uv__insert_pending_req(loop, (uv_req_t*) req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs); handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); handle->write_queue_size += req->u.io.queued_bytes; if (handle->flags & UV_HANDLE_EMULATE_IOCP && !RegisterWaitForSingleObject(&req->wait_handle, @@ -934,7 +946,7 @@ int uv__tcp_write(uv_loop_t* loop, req->u.io.queued_bytes = 0; handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); SET_REQ_ERROR(req, WSAGetLastError()); uv__insert_pending_req(loop, (uv_req_t*) req); } @@ -1105,7 +1117,7 @@ void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, assert(handle->write_queue_size >= req->u.io.queued_bytes); handle->write_queue_size -= req->u.io.queued_bytes; - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { if (req->wait_handle != INVALID_HANDLE_VALUE) { @@ -1197,7 +1209,7 @@ void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle, assert(handle->type == UV_TCP); - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); err = 0; if (handle->delayed_error) { @@ -1551,11 +1563,6 @@ int uv__tcp_connect(uv_connect_t* req, return 0; } -#ifndef WSA_FLAG_NO_HANDLE_INHERIT -/* Added in Windows 7 SP1. Specify this to avoid race conditions, */ -/* but also manually clear the inherit flag in case this failed. */ -#define WSA_FLAG_NO_HANDLE_INHERIT 0x80 -#endif int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) { SOCKET server = INVALID_SOCKET; diff --git a/deps/uv/src/win/thread.c b/deps/uv/src/win/thread.c index 57c25e8f5a861c..bf39b88633b0d8 100644 --- a/deps/uv/src/win/thread.c +++ b/deps/uv/src/win/thread.c @@ -32,45 +32,23 @@ #include "uv.h" #include "internal.h" -static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) { - DWORD result; - HANDLE existing_event, created_event; - - created_event = CreateEvent(NULL, 1, 0, NULL); - if (created_event == 0) { - /* Could fail in a low-memory situation? */ - uv_fatal_error(GetLastError(), "CreateEvent"); - } +typedef void (*uv__once_cb)(void); - existing_event = InterlockedCompareExchangePointer(&guard->event, - created_event, - NULL); +typedef struct { + uv__once_cb callback; +} uv__once_data_t; - if (existing_event == NULL) { - /* We won the race */ - callback(); +static BOOL WINAPI uv__once_inner(INIT_ONCE *once, void* param, void** context) { + uv__once_data_t* data = param; - result = SetEvent(created_event); - assert(result); - guard->ran = 1; + data->callback(); - } else { - /* We lost the race. Destroy the event we created and wait for the existing - * one to become signaled. */ - CloseHandle(created_event); - result = WaitForSingleObject(existing_event, INFINITE); - assert(result == WAIT_OBJECT_0); - } + return TRUE; } - -void uv_once(uv_once_t* guard, void (*callback)(void)) { - /* Fast case - avoid WaitForSingleObject. */ - if (guard->ran) { - return; - } - - uv__once_inner(guard, callback); +void uv_once(uv_once_t* guard, uv__once_cb callback) { + uv__once_data_t data = { .callback = callback }; + InitOnceExecuteOnce(&guard->init_once, uv__once_inner, (void*) &data, NULL); } diff --git a/deps/uv/src/win/tty.c b/deps/uv/src/win/tty.c index 9f8dd698d76124..c0339ded2e4b76 100644 --- a/deps/uv/src/win/tty.c +++ b/deps/uv/src/win/tty.c @@ -2183,7 +2183,7 @@ int uv__tty_write(uv_loop_t* loop, handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); req->u.io.queued_bytes = 0; @@ -2219,7 +2219,7 @@ void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle, int err; handle->write_queue_size -= req->u.io.queued_bytes; - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); if (req->cb) { err = GET_REQ_ERROR(req); @@ -2263,7 +2263,7 @@ void uv__process_tty_shutdown_req(uv_loop_t* loop, uv_tty_t* stream, uv_shutdown assert(req); stream->stream.conn.shutdown_req = NULL; - UNREGISTER_HANDLE_REQ(loop, stream, req); + UNREGISTER_HANDLE_REQ(loop, stream); /* TTY shutdown is really just a no-op */ if (req->cb) { @@ -2380,8 +2380,8 @@ static DWORD WINAPI uv__tty_console_resize_watcher_thread(void* param) { /* Make sure to not overwhelm the system with resize events */ Sleep(33); WaitForSingleObject(uv__tty_console_resized, INFINITE); - uv__tty_console_signal_resize(); ResetEvent(uv__tty_console_resized); + uv__tty_console_signal_resize(); } return 0; } diff --git a/deps/uv/src/win/udp.c b/deps/uv/src/win/udp.c index eab53842d4fe37..5c8f6e1dd0b449 100644 --- a/deps/uv/src/win/udp.c +++ b/deps/uv/src/win/udp.c @@ -200,6 +200,12 @@ static int uv__udp_maybe_bind(uv_udp_t* handle, if (handle->flags & UV_HANDLE_BOUND) return 0; + /* There is no SO_REUSEPORT on Windows, Windows only knows SO_REUSEADDR. + * so we just return an error directly when UV_UDP_REUSEPORT is requested + * for binding the socket. */ + if (flags & UV_UDP_REUSEPORT) + return ERROR_NOT_SUPPORTED; + if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) { /* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */ return ERROR_INVALID_PARAMETER; @@ -376,7 +382,7 @@ static int uv__send(uv_udp_send_t* req, handle->reqs_pending++; handle->send_queue_size += req->u.io.queued_bytes; handle->send_queue_count++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); uv__insert_pending_req(loop, (uv_req_t*)req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ @@ -384,7 +390,7 @@ static int uv__send(uv_udp_send_t* req, handle->reqs_pending++; handle->send_queue_size += req->u.io.queued_bytes; handle->send_queue_count++; - REGISTER_HANDLE_REQ(loop, handle, req); + REGISTER_HANDLE_REQ(loop, handle); } else { /* Send failed due to an error. */ return WSAGetLastError(); @@ -527,7 +533,7 @@ void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle, handle->send_queue_size -= req->u.io.queued_bytes; handle->send_queue_count--; - UNREGISTER_HANDLE_REQ(loop, handle, req); + UNREGISTER_HANDLE_REQ(loop, handle); if (req->cb) { err = 0; diff --git a/deps/uv/src/win/util.c b/deps/uv/src/win/util.c index a96cb915930a30..e0dba1aaa94e28 100644 --- a/deps/uv/src/win/util.c +++ b/deps/uv/src/win/util.c @@ -316,25 +316,19 @@ uv_pid_t uv_os_getpid(void) { uv_pid_t uv_os_getppid(void) { - int parent_pid = -1; - HANDLE handle; - PROCESSENTRY32 pe; - DWORD current_pid = GetCurrentProcessId(); - - pe.dwSize = sizeof(PROCESSENTRY32); - handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); - - if (Process32First(handle, &pe)) { - do { - if (pe.th32ProcessID == current_pid) { - parent_pid = pe.th32ParentProcessID; - break; - } - } while( Process32Next(handle, &pe)); + NTSTATUS nt_status; + PROCESS_BASIC_INFORMATION basic_info; + + nt_status = pNtQueryInformationProcess(GetCurrentProcess(), + ProcessBasicInformation, + &basic_info, + sizeof(basic_info), + NULL); + if (NT_SUCCESS(nt_status)) { + return basic_info.InheritedFromUniqueProcessId; + } else { + return -1; } - - CloseHandle(handle); - return parent_pid; } @@ -512,19 +506,23 @@ int uv_uptime(double* uptime) { unsigned int uv_available_parallelism(void) { - SYSTEM_INFO info; - unsigned rc; + DWORD_PTR procmask; + DWORD_PTR sysmask; + int count; + int i; /* TODO(bnoordhuis) Use GetLogicalProcessorInformationEx() to support systems * with > 64 CPUs? See https://github.com/libuv/libuv/pull/3458 */ - GetSystemInfo(&info); + count = 0; + if (GetProcessAffinityMask(GetCurrentProcess(), &procmask, &sysmask)) + for (i = 0; i < 8 * sizeof(procmask); i++) + count += 1 & (procmask >> i); - rc = info.dwNumberOfProcessors; - if (rc < 1) - rc = 1; + if (count > 0) + return count; - return rc; + return 1; } @@ -942,8 +940,13 @@ int uv_os_homedir(char* buffer, size_t* size) { r = uv_os_getenv("USERPROFILE", buffer, size); /* Don't return an error if USERPROFILE was not found. */ - if (r != UV_ENOENT) + if (r != UV_ENOENT) { + /* USERPROFILE is empty or invalid */ + if (r == 0 && *size < 3) { + return UV_ENOENT; + } return r; + } /* USERPROFILE is not set, so call uv_os_get_passwd() */ r = uv_os_get_passwd(&pwd); @@ -980,6 +983,12 @@ int uv_os_tmpdir(char* buffer, size_t* size) { if (len == 0) { return uv_translate_sys_error(GetLastError()); } + + /* tmp path is empty or invalid */ + if (len < 3) { + return UV_ENOENT; + } + /* Include space for terminating null char. */ len += 1; path = uv__malloc(len * sizeof(wchar_t)); @@ -1259,6 +1268,9 @@ int uv_os_getenv(const char* name, char* buffer, size_t* size) { SetLastError(ERROR_SUCCESS); len = GetEnvironmentVariableW(name_w, var, varlen); + if (len == 0) + r = uv_translate_sys_error(GetLastError()); + if (len < varlen) break; @@ -1280,15 +1292,8 @@ int uv_os_getenv(const char* name, char* buffer, size_t* size) { uv__free(name_w); name_w = NULL; - if (len == 0) { - r = GetLastError(); - if (r != ERROR_SUCCESS) { - r = uv_translate_sys_error(r); - goto fail; - } - } - - r = uv__copy_utf16_to_utf8(var, len, buffer, size); + if (r == 0) + r = uv__copy_utf16_to_utf8(var, len, buffer, size); fail: @@ -1528,20 +1533,7 @@ int uv_os_uname(uv_utsname_t* buffer) { os_info.dwOSVersionInfoSize = sizeof(os_info); os_info.szCSDVersion[0] = L'\0'; - /* Try calling RtlGetVersion(), and fall back to the deprecated GetVersionEx() - if RtlGetVersion() is not available. */ - if (pRtlGetVersion) { - pRtlGetVersion(&os_info); - } else { - /* Silence GetVersionEx() deprecation warning. */ - #ifdef _MSC_VER - #pragma warning(suppress : 4996) - #endif - if (GetVersionExW(&os_info) == 0) { - r = uv_translate_sys_error(GetLastError()); - goto error; - } - } + pRtlGetVersion(&os_info); /* Populate the version field. */ version_size = 0; diff --git a/deps/uv/src/win/winapi.c b/deps/uv/src/win/winapi.c index 53147b8262e284..a74108db03e701 100644 --- a/deps/uv/src/win/winapi.c +++ b/deps/uv/src/win/winapi.c @@ -48,12 +48,16 @@ sSetWinEventHook pSetWinEventHook; /* ws2_32.dll function pointer */ uv_sGetHostNameW pGetHostNameW; +/* api-ms-win-core-file-l2-1-4.dll function pointer */ +sGetFileInformationByName pGetFileInformationByName; + void uv__winapi_init(void) { HMODULE ntdll_module; HMODULE powrprof_module; HMODULE user32_module; HMODULE kernel32_module; HMODULE ws2_32_module; + HMODULE api_win_core_file_module; ntdll_module = GetModuleHandleA("ntdll.dll"); if (ntdll_module == NULL) { @@ -99,7 +103,7 @@ void uv__winapi_init(void) { pNtQueryDirectoryFile = (sNtQueryDirectoryFile) GetProcAddress(ntdll_module, "NtQueryDirectoryFile"); - if (pNtQueryVolumeInformationFile == NULL) { + if (pNtQueryDirectoryFile == NULL) { uv_fatal_error(GetLastError(), "GetProcAddress"); } @@ -144,4 +148,10 @@ void uv__winapi_init(void) { ws2_32_module, "GetHostNameW"); } + + api_win_core_file_module = GetModuleHandleA("api-ms-win-core-file-l2-1-4.dll"); + if (api_win_core_file_module != NULL) { + pGetFileInformationByName = (sGetFileInformationByName)GetProcAddress( + api_win_core_file_module, "GetFileInformationByName"); + } } diff --git a/deps/uv/src/win/winapi.h b/deps/uv/src/win/winapi.h index d380bda42a3105..548081f23a9276 100644 --- a/deps/uv/src/win/winapi.h +++ b/deps/uv/src/win/winapi.h @@ -4125,6 +4125,24 @@ typedef const UNICODE_STRING *PCUNICODE_STRING; # define DEVICE_TYPE DWORD #endif +typedef struct _FILE_STAT_BASIC_INFORMATION { + LARGE_INTEGER FileId; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER AllocationSize; + LARGE_INTEGER EndOfFile; + ULONG FileAttributes; + ULONG ReparseTag; + ULONG NumberOfLinks; + ULONG DeviceType; + ULONG DeviceCharacteristics; + ULONG Reserved; + FILE_ID_128 FileId128; + LARGE_INTEGER VolumeSerialNumber; +} FILE_STAT_BASIC_INFORMATION; + /* MinGW already has a definition for REPARSE_DATA_BUFFER, but mingw-w64 does * not. */ @@ -4224,6 +4242,15 @@ typedef enum _FILE_INFORMATION_CLASS { FileNumaNodeInformation, FileStandardLinkInformation, FileRemoteProtocolInformation, + FileRenameInformationBypassAccessCheck, + FileLinkInformationBypassAccessCheck, + FileVolumeNameInformation, + FileIdInformation, + FileIdExtdDirectoryInformation, + FileReplaceCompletionInformation, + FileHardLinkFullIdInformation, + FileIdExtdBothDirectoryInformation, + FileDispositionInformationEx, /* based on https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ne-wdm-_file_information_class */ FileMaximumInformation } FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS; @@ -4323,6 +4350,10 @@ typedef struct _FILE_DISPOSITION_INFORMATION { BOOLEAN DeleteFile; } FILE_DISPOSITION_INFORMATION, *PFILE_DISPOSITION_INFORMATION; +typedef struct _FILE_DISPOSITION_INFORMATION_EX { + DWORD Flags; +} FILE_DISPOSITION_INFORMATION_EX, *PFILE_DISPOSITION_INFORMATION_EX; + typedef struct _FILE_PIPE_LOCAL_INFORMATION { ULONG NamedPipeType; ULONG NamedPipeConfiguration; @@ -4427,6 +4458,14 @@ typedef struct _FILE_FS_SECTOR_SIZE_INFORMATION { ULONG ByteOffsetForPartitionAlignment; } FILE_FS_SECTOR_SIZE_INFORMATION, *PFILE_FS_SECTOR_SIZE_INFORMATION; +typedef struct _PROCESS_BASIC_INFORMATION { + PVOID Reserved1; + PVOID PebBaseAddress; + PVOID Reserved2[2]; + ULONG_PTR UniqueProcessId; + ULONG_PTR InheritedFromUniqueProcessId; +} PROCESS_BASIC_INFORMATION, *PPROCESS_BASIC_INFORMATION; + typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION { LARGE_INTEGER IdleTime; LARGE_INTEGER KernelTime; @@ -4440,6 +4479,10 @@ typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION { # define SystemProcessorPerformanceInformation 8 #endif +#ifndef ProcessBasicInformation +# define ProcessBasicInformation 0 +#endif + #ifndef ProcessConsoleHostProcess # define ProcessConsoleHostProcess 49 #endif @@ -4739,6 +4782,21 @@ typedef struct _TCP_INITIAL_RTO_PARAMETERS { # define SIO_TCP_INITIAL_RTO _WSAIOW(IOC_VENDOR,17) #endif +/* from winnt.h */ +typedef enum _FILE_INFO_BY_NAME_CLASS { + FileStatByNameInfo, + FileStatLxByNameInfo, + FileCaseSensitiveByNameInfo, + FileStatBasicByNameInfo, + MaximumFileInfoByNameClass +} FILE_INFO_BY_NAME_CLASS; + +typedef BOOL(WINAPI* sGetFileInformationByName)( + PCWSTR FileName, + FILE_INFO_BY_NAME_CLASS FileInformationClass, + PVOID FileInfoBuffer, + ULONG FileInfoBufferSize); + /* Ntdll function pointers */ extern sRtlGetVersion pRtlGetVersion; extern sRtlNtStatusToDosError pRtlNtStatusToDosError; @@ -4759,6 +4817,9 @@ extern sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotifi /* User32.dll function pointer */ extern sSetWinEventHook pSetWinEventHook; +/* api-ms-win-core-file-l2-1-4.dll function pointers */ +extern sGetFileInformationByName pGetFileInformationByName; + /* ws2_32.dll function pointer */ /* mingw doesn't have this definition, so let's declare it here locally */ typedef int (WINAPI *uv_sGetHostNameW) diff --git a/deps/uv/test/task.h b/deps/uv/test/task.h index 8b8353263da90f..e25a9c9a1386aa 100644 --- a/deps/uv/test/task.h +++ b/deps/uv/test/task.h @@ -53,6 +53,10 @@ # define TEST_PIPENAME "\\\\.\\pipe\\uv-test" # define TEST_PIPENAME_2 "\\\\.\\pipe\\uv-test2" # define TEST_PIPENAME_3 "\\\\.\\pipe\\uv-test3" +#elif __ANDROID__ +# define TEST_PIPENAME "/data/local/tmp/uv-test-sock" +# define TEST_PIPENAME_2 "/data/local/tmp/uv-test-sock2" +# define TEST_PIPENAME_3 "/data/local/tmp/uv-test-sock3" #else # define TEST_PIPENAME "/tmp/uv-test-sock" # define TEST_PIPENAME_2 "/tmp/uv-test-sock2" diff --git a/deps/uv/test/test-emfile.c b/deps/uv/test/test-emfile.c index ef2338cdfbb165..3ad8378ca1e405 100644 --- a/deps/uv/test/test-emfile.c +++ b/deps/uv/test/test-emfile.c @@ -75,6 +75,14 @@ TEST_IMPL(emfile) { ASSERT_EQ(errno, EMFILE); close(maxfd); +#if defined(__ANDROID__) + /* Android connect syscall requires an extra file descriptor + * + * It fails in uv__tcp_connect + * */ + close(maxfd - 1); +#endif + /* Now connect and use up the last available file descriptor. The EMFILE * handling logic in src/unix/stream.c should ensure that connect_cb() runs * whereas connection_cb() should *not* run. diff --git a/deps/uv/test/test-env-vars.c b/deps/uv/test/test-env-vars.c index 016f0733c6e424..fd25ea26e14f75 100644 --- a/deps/uv/test/test-env-vars.c +++ b/deps/uv/test/test-env-vars.c @@ -33,6 +33,11 @@ TEST_IMPL(env_vars) { int i, r, envcount, found, found_win_special; uv_env_item_t* envitems; +#if defined(_WIN32) && defined(__ASAN__) + /* See investigation in https://github.com/libuv/libuv/issues/4338 */ + RETURN_SKIP("Test does not currently work on Windows under ASAN"); +#endif + /* Reject invalid inputs when setting an environment variable */ r = uv_os_setenv(NULL, "foo"); ASSERT_EQ(r, UV_EINVAL); diff --git a/deps/uv/test/test-fs-copyfile.c b/deps/uv/test/test-fs-copyfile.c index 3aacf12596f558..f7a0c2363e8f1f 100644 --- a/deps/uv/test/test-fs-copyfile.c +++ b/deps/uv/test/test-fs-copyfile.c @@ -46,6 +46,8 @@ static void handle_result(uv_fs_t* req) { uv_fs_t stat_req; uint64_t size; uint64_t mode; + uint64_t uid; + uint64_t gid; int r; ASSERT_EQ(req->fs_type, UV_FS_COPYFILE); @@ -56,11 +58,15 @@ static void handle_result(uv_fs_t* req) { ASSERT_OK(r); size = stat_req.statbuf.st_size; mode = stat_req.statbuf.st_mode; + uid = stat_req.statbuf.st_uid; + gid = stat_req.statbuf.st_gid; uv_fs_req_cleanup(&stat_req); r = uv_fs_stat(NULL, &stat_req, dst, NULL); ASSERT_OK(r); ASSERT_EQ(stat_req.statbuf.st_size, size); ASSERT_EQ(stat_req.statbuf.st_mode, mode); + ASSERT_EQ(stat_req.statbuf.st_uid, uid); + ASSERT_EQ(stat_req.statbuf.st_gid, gid); uv_fs_req_cleanup(&stat_req); uv_fs_req_cleanup(req); result_check_count++; diff --git a/deps/uv/test/test-fs-event.c b/deps/uv/test/test-fs-event.c index 0ef51180dbd94d..bb223a5f654c03 100644 --- a/deps/uv/test/test-fs-event.c +++ b/deps/uv/test/test-fs-event.c @@ -29,16 +29,6 @@ # include #endif -#ifndef HAVE_KQUEUE -# if defined(__APPLE__) || \ - defined(__DragonFly__) || \ - defined(__FreeBSD__) || \ - defined(__OpenBSD__) || \ - defined(__NetBSD__) -# define HAVE_KQUEUE 1 -# endif -#endif - static uv_fs_event_t fs_event; static const char file_prefix[] = "fsevent-"; static const int fs_event_file_count = 16; @@ -91,6 +81,22 @@ static void create_file(const char* name) { uv_fs_req_cleanup(&req); } +static int delete_dir(const char* name) { + int r; + uv_fs_t req; + r = uv_fs_rmdir(NULL, &req, name, NULL); + uv_fs_req_cleanup(&req); + return r; +} + +static int delete_file(const char* name) { + int r; + uv_fs_t req; + r = uv_fs_unlink(NULL, &req, name, NULL); + uv_fs_req_cleanup(&req); + return r; +} + static void touch_file(const char* name) { int r; uv_file file; @@ -139,6 +145,19 @@ static void fs_event_cb_dir(uv_fs_event_t* handle, const char* filename, uv_close((uv_handle_t*)handle, close_cb); } +static void fs_event_cb_del_dir(uv_fs_event_t* handle, + const char* filename, + int events, + int status) { + ++fs_event_cb_called; + ASSERT_PTR_EQ(handle, &fs_event); + ASSERT_OK(status); + ASSERT(events == UV_CHANGE || events == UV_RENAME); + ASSERT_OK(strcmp(filename, "watch_del_dir")); + ASSERT_OK(uv_fs_event_stop(handle)); + uv_close((uv_handle_t*)handle, close_cb); +} + static const char* fs_event_get_filename(int i) { snprintf(fs_event_filename, sizeof(fs_event_filename), @@ -162,6 +181,15 @@ static void fs_event_create_files(uv_timer_t* handle) { } } +static void fs_event_del_dir(uv_timer_t* handle) { + int r; + + r = delete_dir("watch_del_dir"); + ASSERT_OK(r); + + uv_close((uv_handle_t*)handle, close_cb); +} + static void fs_event_unlink_files(uv_timer_t* handle) { int r; int i; @@ -170,7 +198,7 @@ static void fs_event_unlink_files(uv_timer_t* handle) { if (handle == NULL) { /* Unlink all files */ for (i = 0; i < 16; i++) { - r = remove(fs_event_get_filename(i)); + r = delete_file(fs_event_get_filename(i)); if (handle != NULL) ASSERT_OK(r); } @@ -179,7 +207,7 @@ static void fs_event_unlink_files(uv_timer_t* handle) { ASSERT_LT(fs_event_removed, fs_event_file_count); /* Remove the file */ - ASSERT_OK(remove(fs_event_get_filename(fs_event_removed))); + ASSERT_OK(delete_file(fs_event_get_filename(fs_event_removed))); if (++fs_event_removed < fs_event_file_count) { /* Remove another file on a different event loop tick. We do it this way @@ -197,12 +225,13 @@ static void fs_event_cb_dir_multi_file(uv_fs_event_t* handle, ASSERT_PTR_EQ(handle, &fs_event); ASSERT_OK(status); ASSERT(events == UV_CHANGE || events == UV_RENAME); - #if defined(__APPLE__) || defined(_WIN32) || defined(__linux__) - ASSERT_OK(strncmp(filename, file_prefix, sizeof(file_prefix) - 1)); - #else - ASSERT_NE(filename == NULL || - strncmp(filename, file_prefix, sizeof(file_prefix) - 1) == 0, 0); - #endif +#if defined(__APPLE__) || defined(_WIN32) || defined(__linux__) + ASSERT_NOT_NULL(filename); + ASSERT_MEM_EQ(filename, file_prefix, sizeof(file_prefix) - 1); +#else + if (filename != NULL) + ASSERT_MEM_EQ(filename, file_prefix, sizeof(file_prefix) - 1); +#endif if (fs_event_created + fs_event_removed == fs_event_file_count) { /* Once we've processed all create events, delete all files */ @@ -246,7 +275,7 @@ static void fs_event_unlink_files_in_subdir(uv_timer_t* handle) { if (handle == NULL) { /* Unlink all files */ for (i = 0; i < 16; i++) { - r = remove(fs_event_get_filename_in_subdir(i)); + r = delete_file(fs_event_get_filename_in_subdir(i)); if (handle != NULL) ASSERT_OK(r); } @@ -255,7 +284,7 @@ static void fs_event_unlink_files_in_subdir(uv_timer_t* handle) { ASSERT_LT(fs_event_removed, fs_event_file_count); /* Remove the file */ - ASSERT_OK(remove(fs_event_get_filename_in_subdir(fs_event_removed))); + ASSERT_OK(delete_file(fs_event_get_filename_in_subdir(fs_event_removed))); if (++fs_event_removed < fs_event_file_count) { /* Remove another file on a different event loop tick. We do it this way @@ -416,9 +445,9 @@ TEST_IMPL(fs_event_watch_dir) { /* Setup */ fs_event_unlink_files(NULL); - remove("watch_dir/file2"); - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file2"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); create_dir("watch_dir"); r = uv_fs_event_init(loop, &fs_event); @@ -437,9 +466,47 @@ TEST_IMPL(fs_event_watch_dir) { /* Cleanup */ fs_event_unlink_files(NULL); - remove("watch_dir/file2"); - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file2"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); + + MAKE_VALGRIND_HAPPY(loop); + return 0; +} + +TEST_IMPL(fs_event_watch_delete_dir) { +#if defined(NO_FS_EVENTS) + RETURN_SKIP(NO_FS_EVENTS); +#elif defined(__MVS__) + RETURN_SKIP("Directory watching not supported on this platform."); +#elif defined(__APPLE__) && defined(__TSAN__) + RETURN_SKIP("Times out under TSAN."); +#endif + + uv_loop_t* loop = uv_default_loop(); + int r; + + /* Setup */ + fs_event_unlink_files(NULL); + delete_dir("watch_del_dir/"); + create_dir("watch_del_dir"); + + r = uv_fs_event_init(loop, &fs_event); + ASSERT_OK(r); + r = uv_fs_event_start(&fs_event, fs_event_cb_del_dir, "watch_del_dir", 0); + ASSERT_OK(r); + r = uv_timer_init(loop, &timer); + ASSERT_OK(r); + r = uv_timer_start(&timer, fs_event_del_dir, 100, 0); + ASSERT_OK(r); + + uv_run(loop, UV_RUN_DEFAULT); + + ASSERT_EQ(1, fs_event_cb_called); + ASSERT_EQ(2, close_cb_called); + + /* Cleanup */ + fs_event_unlink_files(NULL); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -457,10 +524,10 @@ TEST_IMPL(fs_event_watch_dir_recursive) { /* Setup */ loop = uv_default_loop(); fs_event_unlink_files(NULL); - remove("watch_dir/file2"); - remove("watch_dir/file1"); - remove("watch_dir/subdir"); - remove("watch_dir/"); + delete_file("watch_dir/file2"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/subdir"); + delete_dir("watch_dir/"); create_dir("watch_dir"); create_dir("watch_dir/subdir"); @@ -500,10 +567,10 @@ TEST_IMPL(fs_event_watch_dir_recursive) { /* Cleanup */ fs_event_unlink_files_in_subdir(NULL); - remove("watch_dir/file2"); - remove("watch_dir/file1"); - remove("watch_dir/subdir"); - remove("watch_dir/"); + delete_file("watch_dir/file2"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/subdir"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -521,8 +588,8 @@ TEST_IMPL(fs_event_watch_dir_short_path) { /* Setup */ loop = uv_default_loop(); - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); create_dir("watch_dir"); create_file("watch_dir/file1"); @@ -549,8 +616,8 @@ TEST_IMPL(fs_event_watch_dir_short_path) { } /* Cleanup */ - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); @@ -571,9 +638,9 @@ TEST_IMPL(fs_event_watch_file) { int r; /* Setup */ - remove("watch_dir/file2"); - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file2"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); create_dir("watch_dir"); create_file("watch_dir/file1"); create_file("watch_dir/file2"); @@ -594,9 +661,9 @@ TEST_IMPL(fs_event_watch_file) { ASSERT_EQ(2, close_cb_called); /* Cleanup */ - remove("watch_dir/file2"); - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file2"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -618,9 +685,9 @@ TEST_IMPL(fs_event_watch_file_exact_path) { loop = uv_default_loop(); /* Setup */ - remove("watch_dir/file.js"); - remove("watch_dir/file.jsx"); - remove("watch_dir/"); + delete_file("watch_dir/file.js"); + delete_file("watch_dir/file.jsx"); + delete_dir("watch_dir/"); create_dir("watch_dir"); create_file("watch_dir/file.js"); create_file("watch_dir/file.jsx"); @@ -647,9 +714,9 @@ TEST_IMPL(fs_event_watch_file_exact_path) { ASSERT_EQ(2, timer_cb_exact_called); /* Cleanup */ - remove("watch_dir/file.js"); - remove("watch_dir/file.jsx"); - remove("watch_dir/"); + delete_file("watch_dir/file.js"); + delete_file("watch_dir/file.jsx"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -690,7 +757,7 @@ TEST_IMPL(fs_event_watch_file_current_dir) { loop = uv_default_loop(); /* Setup */ - remove("watch_file"); + delete_file("watch_file"); create_file("watch_file"); #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_12) /* Empirically, kevent seems to (sometimes) report the preceding @@ -728,7 +795,7 @@ TEST_IMPL(fs_event_watch_file_current_dir) { ASSERT_EQ(1, close_cb_called); /* Cleanup */ - remove("watch_file"); + delete_file("watch_file"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -770,8 +837,8 @@ TEST_IMPL(fs_event_no_callback_after_close) { int r; /* Setup */ - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); create_dir("watch_dir"); create_file("watch_dir/file1"); @@ -792,8 +859,8 @@ TEST_IMPL(fs_event_no_callback_after_close) { ASSERT_EQ(1, close_cb_called); /* Cleanup */ - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -808,8 +875,8 @@ TEST_IMPL(fs_event_no_callback_on_close) { int r; /* Setup */ - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); create_dir("watch_dir"); create_file("watch_dir/file1"); @@ -829,8 +896,8 @@ TEST_IMPL(fs_event_no_callback_on_close) { ASSERT_EQ(1, close_cb_called); /* Cleanup */ - remove("watch_dir/file1"); - remove("watch_dir/"); + delete_file("watch_dir/file1"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -902,8 +969,8 @@ TEST_IMPL(fs_event_close_with_pending_event) { ASSERT_EQ(1, close_cb_called); /* Clean up */ - remove("watch_dir/file"); - remove("watch_dir/"); + delete_file("watch_dir/file"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -927,7 +994,7 @@ TEST_IMPL(fs_event_close_with_pending_delete_event) { ASSERT_OK(r); /* Generate an fs event. */ - remove("watch_dir/file"); + delete_file("watch_dir/file"); /* Allow time for the remove event to propagate to the pending list. */ /* XXX - perhaps just for __sun? */ @@ -941,7 +1008,7 @@ TEST_IMPL(fs_event_close_with_pending_delete_event) { ASSERT_EQ(1, close_cb_called); /* Clean up */ - remove("watch_dir/"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -984,7 +1051,7 @@ TEST_IMPL(fs_event_close_in_callback) { /* Clean up */ fs_event_unlink_files(NULL); - remove("watch_dir/"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; @@ -1020,7 +1087,7 @@ TEST_IMPL(fs_event_start_and_close) { ASSERT_EQ(2, close_cb_called); - remove("watch_dir/"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1073,111 +1140,11 @@ TEST_IMPL(fs_event_getpath) { close_cb_called = 0; } - remove("watch_dir/"); + delete_dir("watch_dir/"); MAKE_VALGRIND_HAPPY(loop); return 0; } -#if defined(__APPLE__) - -static int fs_event_error_reported; - -static void fs_event_error_report_cb(uv_fs_event_t* handle, - const char* filename, - int events, - int status) { - if (status != 0) - fs_event_error_reported = status; -} - -static void timer_cb_nop(uv_timer_t* handle) { - ++timer_cb_called; - uv_close((uv_handle_t*) handle, close_cb); -} - -static void fs_event_error_report_close_cb(uv_handle_t* handle) { - ASSERT_NOT_NULL(handle); - close_cb_called++; - - /* handle is allocated on-stack, no need to free it */ -} - - -TEST_IMPL(fs_event_error_reporting) { - unsigned int i; - uv_loop_t loops[1024]; - uv_fs_event_t events[ARRAY_SIZE(loops)]; - uv_loop_t* loop; - uv_fs_event_t* event; - - TEST_FILE_LIMIT(ARRAY_SIZE(loops) * 3); - - remove("watch_dir/"); - create_dir("watch_dir"); - - /* Create a lot of loops, and start FSEventStream in each of them. - * Eventually, this should create enough streams to make FSEventStreamStart() - * fail. - */ - for (i = 0; i < ARRAY_SIZE(loops); i++) { - loop = &loops[i]; - ASSERT_OK(uv_loop_init(loop)); - event = &events[i]; - - timer_cb_called = 0; - close_cb_called = 0; - ASSERT_OK(uv_fs_event_init(loop, event)); - ASSERT_OK(uv_fs_event_start(event, - fs_event_error_report_cb, - "watch_dir", - 0)); - uv_unref((uv_handle_t*) event); - - /* Let loop run for some time */ - ASSERT_OK(uv_timer_init(loop, &timer)); - ASSERT_OK(uv_timer_start(&timer, timer_cb_nop, 2, 0)); - uv_run(loop, UV_RUN_DEFAULT); - ASSERT_EQ(1, timer_cb_called); - ASSERT_EQ(1, close_cb_called); - if (fs_event_error_reported != 0) - break; - } - - /* At least one loop should fail */ - ASSERT_EQ(fs_event_error_reported, UV_EMFILE); - - /* Stop and close all events, and destroy loops */ - do { - loop = &loops[i]; - event = &events[i]; - - ASSERT_OK(uv_fs_event_stop(event)); - uv_ref((uv_handle_t*) event); - uv_close((uv_handle_t*) event, fs_event_error_report_close_cb); - - close_cb_called = 0; - uv_run(loop, UV_RUN_DEFAULT); - ASSERT_EQ(1, close_cb_called); - - uv_loop_close(loop); - } while (i-- != 0); - - remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(uv_default_loop()); - return 0; -} - -#else /* !defined(__APPLE__) */ - -TEST_IMPL(fs_event_error_reporting) { - /* No-op, needed only for FSEvents backend */ - - MAKE_VALGRIND_HAPPY(uv_default_loop()); - return 0; -} - -#endif /* defined(__APPLE__) */ - TEST_IMPL(fs_event_watch_invalid_path) { #if defined(NO_FS_EVENTS) RETURN_SKIP(NO_FS_EVENTS); @@ -1216,7 +1183,7 @@ TEST_IMPL(fs_event_stop_in_cb) { RETURN_SKIP(NO_FS_EVENTS); #endif - remove(path); + delete_file(path); create_file(path); ASSERT_OK(uv_fs_event_init(uv_default_loop(), &fs)); @@ -1239,7 +1206,7 @@ TEST_IMPL(fs_event_stop_in_cb) { ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT_EQ(1, fs_event_cb_stop_calls); - remove(path); + delete_file(path); MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; diff --git a/deps/uv/test/test-fs-readdir.c b/deps/uv/test/test-fs-readdir.c index 0f2b4afa58c589..bacea653587c51 100644 --- a/deps/uv/test/test-fs-readdir.c +++ b/deps/uv/test/test-fs-readdir.c @@ -29,6 +29,7 @@ static uv_fs_t readdir_req; static uv_fs_t closedir_req; static uv_dirent_t dirents[1]; +static uv_dirent_t symlink_dirents[2]; static int empty_opendir_cb_count; static int empty_closedir_cb_count; @@ -460,3 +461,88 @@ TEST_IMPL(fs_readdir_non_empty_dir) { MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } + +static void readdir_symlink_readdir_cb(uv_fs_t* req) { + uv_dir_t* dir; + + ASSERT_PTR_EQ(req, &readdir_req); + ASSERT_EQ(req->fs_type, UV_FS_READDIR); + dir = req->ptr; + + if (req->result == 0) { + uv_fs_req_cleanup(req); + ASSERT_EQ(3, non_empty_readdir_cb_count); + uv_fs_closedir(uv_default_loop(), + &closedir_req, + dir, + non_empty_closedir_cb); + } else { + if (strcmp(symlink_dirents[0].name, "test_symlink") == 0) { + ASSERT_EQ(symlink_dirents[0].type, UV_DIRENT_LINK); + } else { + ASSERT_EQ(symlink_dirents[1].type, UV_DIRENT_LINK); + } + uv_fs_req_cleanup(req); + } +} + +static void readdir_symlink_opendir_cb(uv_fs_t* req) { + uv_dir_t* dir; + int r; + + ASSERT_PTR_EQ(req, &opendir_req); + ASSERT_EQ(req->fs_type, UV_FS_OPENDIR); + ASSERT_OK(req->result); + ASSERT_NOT_NULL(req->ptr); + + dir = req->ptr; + dir->dirents = symlink_dirents; + dir->nentries = ARRAY_SIZE(symlink_dirents); + + r = uv_fs_readdir(uv_default_loop(), + &readdir_req, + dir, + readdir_symlink_readdir_cb); + ASSERT_OK(r); + uv_fs_req_cleanup(req); +} + +static void cleanup_symlink_test_files(void) { + uv_fs_t req; + + uv_fs_rmdir(NULL, &req, "test_symlink_dir/test_subdir", NULL); + uv_fs_req_cleanup(&req); + uv_fs_unlink(NULL, &req, "test_symlink_dir/test_symlink", NULL); + uv_fs_req_cleanup(&req); + uv_fs_rmdir(NULL, &req, "test_symlink_dir", NULL); + uv_fs_req_cleanup(&req); +} + +TEST_IMPL(fs_readdir_symlink) { + + uv_fs_t mkdir_req; + uv_fs_t symlink_req; + int r; + + cleanup_symlink_test_files(); + + r = uv_fs_mkdir(uv_default_loop(), &mkdir_req, "test_symlink_dir", 0755, NULL); + ASSERT_OK(r); + + r = uv_fs_mkdir(uv_default_loop(), &mkdir_req, "test_symlink_dir/test_subdir", 0755, NULL); + ASSERT_OK(r); + + r = uv_fs_symlink(uv_default_loop(), &symlink_req, "test_symlink_dir/test_subdir", "test_symlink_dir/test_symlink", UV_FS_SYMLINK_DIR, NULL); + ASSERT_OK(r); + + r = uv_fs_opendir(uv_default_loop(), &opendir_req, "test_symlink_dir", readdir_symlink_opendir_cb); + ASSERT_OK(r); + + r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); + ASSERT_OK(r); + + cleanup_symlink_test_files(); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c index fe78117bbeddc0..ff0f9fc89a2d1b 100644 --- a/deps/uv/test/test-fs.c +++ b/deps/uv/test/test-fs.c @@ -104,6 +104,7 @@ static uv_loop_t* loop; static uv_fs_t open_req1; static uv_fs_t open_req2; +static uv_fs_t open_req_noclose; static uv_fs_t read_req; static uv_fs_t write_req; static uv_fs_t unlink_req; @@ -304,7 +305,7 @@ static void chown_root_cb(uv_fs_t* req) { ASSERT_EQ(req->result, UV_EINVAL); # elif defined(__PASE__) /* On IBMi PASE, there is no root user. uid 0 is user qsecofr. - * User may grant qsecofr's privileges, including changing + * User may grant qsecofr's privileges, including changing * the file's ownership to uid 0. */ ASSERT(req->result == 0 || req->result == UV_EPERM); @@ -1067,6 +1068,50 @@ TEST_IMPL(fs_file_sync) { return 0; } +TEST_IMPL(fs_posix_delete) { + int r; + + /* Setup. */ + unlink("test_dir/file"); + rmdir("test_dir"); + + r = uv_fs_mkdir(NULL, &mkdir_req, "test_dir", 0755, NULL); + ASSERT_OK(r); + + r = uv_fs_open(NULL, &open_req_noclose, "test_dir/file", UV_FS_O_WRONLY | UV_FS_O_CREAT, S_IWUSR | S_IRUSR, NULL); + ASSERT_GE(r, 0); + uv_fs_req_cleanup(&open_req_noclose); + + /* should not be possible to delete the non-empty dir */ + r = uv_fs_rmdir(NULL, &rmdir_req, "test_dir", NULL); + ASSERT((r == UV_ENOTEMPTY) || (r == UV_EEXIST)); + ASSERT_EQ(r, rmdir_req.result); + uv_fs_req_cleanup(&rmdir_req); + + r = uv_fs_rmdir(NULL, &rmdir_req, "test_dir/file", NULL); + ASSERT((r == UV_ENOTDIR) || (r == UV_ENOENT)); + ASSERT_EQ(r, rmdir_req.result); + uv_fs_req_cleanup(&rmdir_req); + + r = uv_fs_unlink(NULL, &unlink_req, "test_dir/file", NULL); + ASSERT_OK(r); + ASSERT_OK(unlink_req.result); + uv_fs_req_cleanup(&unlink_req); + + /* delete the dir while the file is still open, which should succeed on posix */ + r = uv_fs_rmdir(NULL, &rmdir_req, "test_dir", NULL); + ASSERT_OK(r); + ASSERT_OK(rmdir_req.result); + uv_fs_req_cleanup(&rmdir_req); + + /* Cleanup */ + r = uv_fs_close(NULL, &close_req, open_req_noclose.result, NULL); + ASSERT_OK(r); + uv_fs_req_cleanup(&close_req); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} static void fs_file_write_null_buffer(int add_flags) { int r; diff --git a/deps/uv/test/test-homedir.c b/deps/uv/test/test-homedir.c index 769d5c8179fc77..e335540d106dec 100644 --- a/deps/uv/test/test-homedir.c +++ b/deps/uv/test/test-homedir.c @@ -68,5 +68,14 @@ TEST_IMPL(homedir) { r = uv_os_homedir(homedir, &len); ASSERT_EQ(r, UV_EINVAL); +#ifdef _WIN32 + /* Test empty environment variable */ + r = uv_os_setenv("USERPROFILE", ""); + ASSERT_EQ(r, 0); + len = sizeof homedir; + r = uv_os_homedir(homedir, &len); + ASSERT_EQ(r, UV_ENOENT); +#endif + return 0; } diff --git a/deps/uv/test/test-iouring-pollhup.c b/deps/uv/test/test-iouring-pollhup.c new file mode 100644 index 00000000000000..342789aa7d9726 --- /dev/null +++ b/deps/uv/test/test-iouring-pollhup.c @@ -0,0 +1,111 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#ifdef _WIN32 + +TEST_IMPL(iouring_pollhup) { + RETURN_SKIP("Not on Windows."); +} + +#else /* !_WIN32 */ + +#include /* close() */ + +static uv_pipe_t p1; +static uv_pipe_t p2; +static uv_idle_t idle_handle; +static int iters; +static int duped_fd; +static int newpipefds[2]; + +static void alloc_buffer(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) { + static char slab[32]; + *buf = uv_buf_init(slab, sizeof(slab)); +} + +static void read_data2(uv_stream_t* stream, + ssize_t nread, + const uv_buf_t* buf) { + if (nread < 0) { + ASSERT_EQ(nread, UV_EOF); + ASSERT_OK(close(duped_fd)); + duped_fd = -1; + uv_close((uv_handle_t*) &p2, NULL); + uv_close((uv_handle_t*) &idle_handle, NULL); + } else { + /* If nread == 0 is because of POLLHUP received still from pipefds[0] file + * description which is still referenced in duped_fd. It should not happen + * if close(p1) was called after EPOLL_CTL_DEL. + */ + ASSERT_GT(nread, 0); + } +} + +static void idle_cb(uv_idle_t* handle) { + if (++iters == 1) { + ASSERT_OK(uv_pipe_open(&p2, newpipefds[0])); + ASSERT_OK(uv_read_start((uv_stream_t*) &p2, alloc_buffer, read_data2)); + } else { + ASSERT_OK(uv_idle_stop(handle)); + ASSERT_OK(close(newpipefds[1])); + newpipefds[1] = -1; + } +} + +static void read_data(uv_stream_t* stream, + ssize_t nread, + const uv_buf_t* buf) { + ASSERT_EQ(nread, UV_EOF); + uv_close((uv_handle_t*) stream, NULL); + ASSERT_OK(uv_idle_start(&idle_handle, idle_cb)); +} + +TEST_IMPL(iouring_pollhup) { + uv_loop_t* loop; + int pipefds[2]; + + loop = uv_default_loop(); + ASSERT_OK(uv_pipe_init(loop, &p1, 0)); + ASSERT_OK(uv_pipe_init(loop, &p2, 0)); + ASSERT_OK(uv_idle_init(loop, &idle_handle)); + ASSERT_OK(pipe(pipefds)); + ASSERT_OK(pipe(newpipefds)); + + ASSERT_OK(uv_pipe_open(&p1, pipefds[0])); + duped_fd = dup(pipefds[0]); + ASSERT_NE(duped_fd, -1); + + ASSERT_OK(uv_read_start((uv_stream_t*) &p1, alloc_buffer, read_data)); + ASSERT_OK(close(pipefds[1])); /* Close write end, generate POLLHUP. */ + pipefds[1] = -1; + + ASSERT_OK(uv_run(loop, UV_RUN_DEFAULT)); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} + +#endif /* !_WIN32 */ diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h index d30f02faa8515c..e07bd61ecf73c1 100644 --- a/deps/uv/test/test-list.h +++ b/deps/uv/test/test-list.h @@ -153,6 +153,7 @@ TEST_DECLARE (tcp_write_to_half_open_connection) TEST_DECLARE (tcp_unexpected_read) TEST_DECLARE (tcp_read_stop) TEST_DECLARE (tcp_read_stop_start) +TEST_DECLARE (tcp_reuseport) TEST_DECLARE (tcp_rst) TEST_DECLARE (tcp_bind6_error_addrinuse) TEST_DECLARE (tcp_bind6_error_addrnotavail) @@ -189,6 +190,7 @@ TEST_DECLARE (udp_open_twice) TEST_DECLARE (udp_open_bound) TEST_DECLARE (udp_open_connect) TEST_DECLARE (udp_recv_in_a_row) +TEST_DECLARE (udp_reuseport) #ifndef _WIN32 TEST_DECLARE (udp_send_unix) #endif @@ -207,6 +209,7 @@ TEST_DECLARE (pipe_connect_to_file) TEST_DECLARE (pipe_connect_on_prepare) TEST_DECLARE (pipe_getsockname) TEST_DECLARE (pipe_getsockname_abstract) +TEST_DECLARE (pipe_getsockname_autobind) TEST_DECLARE (pipe_getsockname_blocking) TEST_DECLARE (pipe_pending_instances) TEST_DECLARE (pipe_sendmsg) @@ -352,6 +355,7 @@ TEST_DECLARE (fs_file_nametoolong) TEST_DECLARE (fs_file_loop) TEST_DECLARE (fs_file_async) TEST_DECLARE (fs_file_sync) +TEST_DECLARE (fs_posix_delete) TEST_DECLARE (fs_file_write_null_buffer) TEST_DECLARE (fs_async_dir) TEST_DECLARE (fs_async_sendfile) @@ -393,6 +397,7 @@ TEST_DECLARE (fs_stat_missing_path) TEST_DECLARE (fs_read_bufs) TEST_DECLARE (fs_read_file_eof) TEST_DECLARE (fs_event_watch_dir) +TEST_DECLARE (fs_event_watch_delete_dir) TEST_DECLARE (fs_event_watch_dir_recursive) #ifdef _WIN32 TEST_DECLARE (fs_event_watch_dir_short_path) @@ -412,7 +417,6 @@ TEST_DECLARE (fs_event_close_with_pending_event) TEST_DECLARE (fs_event_close_with_pending_delete_event) TEST_DECLARE (fs_event_close_in_callback) TEST_DECLARE (fs_event_start_and_close) -TEST_DECLARE (fs_event_error_reporting) TEST_DECLARE (fs_event_getpath) TEST_DECLARE (fs_event_stop_in_cb) TEST_DECLARE (fs_scandir_empty_dir) @@ -424,6 +428,9 @@ TEST_DECLARE (fs_readdir_empty_dir) TEST_DECLARE (fs_readdir_file) TEST_DECLARE (fs_readdir_non_empty_dir) TEST_DECLARE (fs_readdir_non_existing_dir) +#ifdef _WIN32 +TEST_DECLARE (fs_readdir_symlink) +#endif TEST_DECLARE (fs_rename_to_existing_file) TEST_DECLARE (fs_write_multiple_bufs) TEST_DECLARE (fs_read_write_null_arguments) @@ -563,6 +570,8 @@ TEST_DECLARE (fork_threadpool_queue_work_simple) #endif #endif +TEST_DECLARE (iouring_pollhup) + TEST_DECLARE (idna_toascii) TEST_DECLARE (utf8_decode1) TEST_DECLARE (utf8_decode1_overrun) @@ -763,6 +772,8 @@ TASK_LIST_START TEST_ENTRY (tcp_read_stop_start) + TEST_ENTRY (tcp_reuseport) + TEST_ENTRY (tcp_rst) TEST_HELPER (tcp_rst, tcp4_echo_server) @@ -799,6 +810,7 @@ TASK_LIST_START TEST_ENTRY (udp_sendmmsg_error) TEST_ENTRY (udp_try_send) TEST_ENTRY (udp_recv_in_a_row) + TEST_ENTRY (udp_reuseport) TEST_ENTRY (udp_open) TEST_ENTRY (udp_open_twice) @@ -818,6 +830,7 @@ TASK_LIST_START TEST_ENTRY (pipe_overlong_path) TEST_ENTRY (pipe_getsockname) TEST_ENTRY (pipe_getsockname_abstract) + TEST_ENTRY (pipe_getsockname_autobind) TEST_ENTRY (pipe_getsockname_blocking) TEST_ENTRY (pipe_pending_instances) TEST_ENTRY (pipe_sendmsg) @@ -1055,6 +1068,7 @@ TASK_LIST_START TEST_ENTRY (fs_file_loop) TEST_ENTRY (fs_file_async) TEST_ENTRY (fs_file_sync) + TEST_ENTRY (fs_posix_delete) TEST_ENTRY (fs_file_write_null_buffer) TEST_ENTRY (fs_async_dir) TEST_ENTRY (fs_async_sendfile) @@ -1096,6 +1110,7 @@ TASK_LIST_START TEST_ENTRY (fs_read_file_eof) TEST_ENTRY (fs_file_open_append) TEST_ENTRY (fs_event_watch_dir) + TEST_ENTRY (fs_event_watch_delete_dir) TEST_ENTRY (fs_event_watch_dir_recursive) #ifdef _WIN32 TEST_ENTRY (fs_event_watch_dir_short_path) @@ -1115,7 +1130,6 @@ TASK_LIST_START TEST_ENTRY (fs_event_close_with_pending_delete_event) TEST_ENTRY (fs_event_close_in_callback) TEST_ENTRY (fs_event_start_and_close) - TEST_ENTRY_CUSTOM (fs_event_error_reporting, 0, 0, 60000) TEST_ENTRY (fs_event_getpath) TEST_ENTRY (fs_event_stop_in_cb) TEST_ENTRY (fs_scandir_empty_dir) @@ -1127,6 +1141,9 @@ TASK_LIST_START TEST_ENTRY (fs_readdir_file) TEST_ENTRY (fs_readdir_non_empty_dir) TEST_ENTRY (fs_readdir_non_existing_dir) +#ifdef _WIN32 + TEST_ENTRY (fs_readdir_symlink) +#endif TEST_ENTRY (fs_rename_to_existing_file) TEST_ENTRY (fs_write_multiple_bufs) TEST_ENTRY (fs_write_alotof_bufs) @@ -1204,6 +1221,8 @@ TASK_LIST_START #endif #endif + TEST_ENTRY (iouring_pollhup) + TEST_ENTRY (utf8_decode1) TEST_ENTRY (utf8_decode1_overrun) TEST_ENTRY (uname) diff --git a/deps/uv/test/test-pipe-bind-error.c b/deps/uv/test/test-pipe-bind-error.c index 412f23aedb0a01..16164a7ee9034b 100644 --- a/deps/uv/test/test-pipe-bind-error.c +++ b/deps/uv/test/test-pipe-bind-error.c @@ -172,19 +172,36 @@ TEST_IMPL(pipe_overlong_path) { #ifndef _WIN32 char path[512]; memset(path, '@', sizeof(path)); + + /* On most platforms sun_path is smaller than the NAME_MAX + * Though there is nothing in the POSIX spec that says it needs to be. + * POSIX allows PATH_MAX length paths in saddr.sun_path BUT individual + * components of the path can only be NAME_MAX long. + * So in this case we end up with UV_ENAMETOOLONG error rather than + * UV_EINVAL. + * ref: https://github.com/libuv/libuv/issues/4231#issuecomment-2194612711 + * On AIX the sun_path is larger than the NAME_MAX + */ +#if defined(_AIX) && !defined(__PASE__) + ASSERT_EQ(UV_ENAMETOOLONG, + uv_pipe_bind2(&pipe, path, sizeof(path), UV_PIPE_NO_TRUNCATE)); + /* UV_ENAMETOOLONG is delayed in uv_pipe_connect2 and won't propagate until + * uv_run is called and causes timeouts, therefore in this case we skip calling + * uv_pipe_connect2 + */ +#else ASSERT_EQ(UV_EINVAL, - uv_pipe_bind2(&pipe, path, sizeof(path), UV_PIPE_NO_TRUNCATE)); + uv_pipe_bind2(&pipe, path, sizeof(path), UV_PIPE_NO_TRUNCATE)); ASSERT_EQ(UV_EINVAL, - uv_pipe_connect2(&req, - &pipe, - path, - sizeof(path), - UV_PIPE_NO_TRUNCATE, - (uv_connect_cb) abort)); + uv_pipe_connect2(&req, + &pipe, + path, + sizeof(path), + UV_PIPE_NO_TRUNCATE, + (uv_connect_cb) abort)); ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT)); -#endif - - ASSERT_EQ(UV_EINVAL, uv_pipe_bind(&pipe, "")); +#endif /*if defined(_AIX) && !defined(__PASE__)*/ +#endif /* ifndef _WIN32 */ uv_pipe_connect(&req, &pipe, "", @@ -195,5 +212,4 @@ TEST_IMPL(pipe_overlong_path) { MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; - } diff --git a/deps/uv/test/test-pipe-getsockname.c b/deps/uv/test/test-pipe-getsockname.c index d76b6ad4917021..34b572343c698a 100644 --- a/deps/uv/test/test-pipe-getsockname.c +++ b/deps/uv/test/test-pipe-getsockname.c @@ -78,6 +78,36 @@ static void pipe_client_connect_cb(uv_connect_t* req, int status) { } +#if defined(__linux__) +/* Socket name looks like \0[0-9a-f]{5}, e.g. "\0bad42" */ +static void check_is_autobind_abstract_socket_name(const char *p, size_t len) { + ASSERT_EQ(len, 6); + ASSERT_EQ(*p, '\0'); + + while (*p != '\0') { + ASSERT((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')); + p++; + } +} + + +static void pipe_client_autobind_connect_cb(uv_connect_t* req, int status) { + char buf[16]; + size_t len; + + ASSERT_OK(status); + len = 5; + ASSERT_EQ(UV_ENOBUFS, uv_pipe_getpeername(&pipe_client, buf, &len)); + len = 6; + ASSERT_OK(uv_pipe_getpeername(&pipe_client, buf, &len)); + check_is_autobind_abstract_socket_name(buf, len); + pipe_client_connect_cb_called++; + uv_close((uv_handle_t*) &pipe_client, pipe_close_cb); + uv_close((uv_handle_t*) &pipe_server, pipe_close_cb); +} +#endif /* defined(__linux__) */ + + static void pipe_server_connection_cb(uv_stream_t* handle, int status) { /* This function *may* be called, depending on whether accept or the * connection callback is called first. @@ -124,9 +154,11 @@ TEST_IMPL(pipe_getsockname) { ASSERT_STR_EQ(pipe_server.pipe_fname, TEST_PIPENAME); #endif - len = sizeof buf; - r = uv_pipe_getsockname(&pipe_server, buf, &len); - ASSERT_OK(r); + len = sizeof(TEST_PIPENAME) - 1; + ASSERT_EQ(UV_ENOBUFS, uv_pipe_getsockname(&pipe_server, buf, &len)); + + len = sizeof(TEST_PIPENAME); + ASSERT_OK(uv_pipe_getsockname(&pipe_server, buf, &len)); ASSERT_NE(0, buf[len - 1]); ASSERT_EQ(buf[len], '\0'); @@ -160,7 +192,8 @@ TEST_IMPL(pipe_getsockname) { len = sizeof buf; r = uv_pipe_getsockname(&pipe_client, buf, &len); - ASSERT(r == 0 && len == 0); + ASSERT_EQ(r, 0); + ASSERT_EQ(len, 0); len = sizeof buf; r = uv_pipe_getpeername(&pipe_client, buf, &len); @@ -228,6 +261,44 @@ TEST_IMPL(pipe_getsockname_abstract) { #endif } + +TEST_IMPL(pipe_getsockname_autobind) { +#if defined(__linux__) + char buf[256]; + size_t buflen; + + buflen = sizeof(buf); + memset(buf, 0, sizeof(buf)); + ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_server, 0)); + ASSERT_OK(uv_pipe_bind2(&pipe_server, "", 0, 0)); + ASSERT_OK(uv_pipe_getsockname(&pipe_server, buf, &buflen)); + check_is_autobind_abstract_socket_name(buf, buflen); + ASSERT_OK(uv_listen((uv_stream_t*) &pipe_server, 0, + pipe_server_connection_cb)); + ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_client, 0)); + ASSERT_OK(uv_pipe_connect2(&connect_req, &pipe_client, + buf, + 1 + strlen(&buf[1]), + 0, + pipe_client_autobind_connect_cb)); + ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + ASSERT_EQ(1, pipe_client_connect_cb_called); + ASSERT_EQ(2, pipe_close_cb_called); + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +#else + /* On other platforms it should simply fail with UV_EINVAL. */ + ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_server, 0)); + ASSERT_EQ(UV_EINVAL, uv_pipe_bind2(&pipe_server, "", 0, 0)); + uv_close((uv_handle_t*) &pipe_server, pipe_close_cb); + ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + ASSERT_EQ(1, pipe_close_cb_called); + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +#endif +} + + TEST_IMPL(pipe_getsockname_blocking) { #ifdef _WIN32 HANDLE readh, writeh; diff --git a/deps/uv/test/test-platform-output.c b/deps/uv/test/test-platform-output.c index f18e097f913607..4e5300da037399 100644 --- a/deps/uv/test/test-platform-output.c +++ b/deps/uv/test/test-platform-output.c @@ -90,6 +90,45 @@ TEST_IMPL(platform_output) { ASSERT_GE(par, 1); printf("uv_available_parallelism: %u\n", par); +#ifdef __linux__ + FILE* file; + int cgroup_version = 0; + unsigned int cgroup_par = 0; + uint64_t quota, period; + + // Attempt to parse cgroup v2 to deduce parallelism constraints + file = fopen("/sys/fs/cgroup/cpu.max", "r"); + if (file) { + if (fscanf(file, "%lu %lu", "a, &period) == 2 && quota > 0) { + cgroup_version = 2; + cgroup_par = (unsigned int)(quota / period); + } + fclose(file); + } + + // If cgroup v2 wasn't present, try parsing cgroup v1 + if (cgroup_version == 0) { + file = fopen("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us", "r"); + if (file) { + if (fscanf(file, "%lu", "a) == 1 && quota > 0 && quota < ~0ULL) { + fclose(file); + file = fopen("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us", "r"); + if (file && fscanf(file, "%lu", &period) == 1) { + cgroup_version = 1; + cgroup_par = (unsigned int)(quota / period); + } + } + if (file) fclose(file); + } + } + + // If we found cgroup parallelism constraints, assert and print them + if (cgroup_par > 0) { + ASSERT_GE(par, cgroup_par); + printf("cgroup v%d available parallelism: %u\n", cgroup_version, cgroup_par); + } +#endif + err = uv_cpu_info(&cpus, &count); #if defined(__CYGWIN__) || defined(__MSYS__) ASSERT_EQ(err, UV_ENOSYS); diff --git a/deps/uv/test/test-poll.c b/deps/uv/test/test-poll.c index fcd644f2c2563b..5161de253768d8 100644 --- a/deps/uv/test/test-poll.c +++ b/deps/uv/test/test-poll.c @@ -626,26 +626,59 @@ TEST_IMPL(poll_unidirectional) { /* Windows won't let you open a directory so we open a file instead. - * OS X lets you poll a file so open the $PWD instead. Both fail - * on Linux so it doesn't matter which one we pick. Both succeed - * on FreeBSD, Solaris and AIX so skip the test on those platforms. + * OS X lets you poll a file so open the $PWD instead. Both fail + * on Linux so it doesn't matter which one we pick. Both succeed + * on Solaris and AIX so skip the test on those platforms. + * On *BSD/Darwin, we disallow polling of regular files, directories. + * In addition to regular files, we also disallow FIFOs on Darwin. */ +#ifdef __APPLE__ +#define TEST_POLL_FIFO_PATH "/tmp/uv-test-poll-fifo" +#endif TEST_IMPL(poll_bad_fdtype) { -#if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__sun) && \ +#if !defined(__sun) && \ !defined(_AIX) && !defined(__MVS__) && \ - !defined(__OpenBSD__) && !defined(__CYGWIN__) && !defined(__MSYS__) && \ - !defined(__NetBSD__) + !defined(__CYGWIN__) && !defined(__MSYS__) uv_poll_t poll_handle; - int fd; + int fd[2]; #if defined(_WIN32) - fd = _open("test/fixtures/empty_file", UV_FS_O_RDONLY); + fd[0] = _open("test/fixtures/empty_file", UV_FS_O_RDONLY); #else - fd = open(".", UV_FS_O_RDONLY); + fd[0] = open(".", UV_FS_O_RDONLY); +#endif + ASSERT_NE(fd[0], -1); + ASSERT_NE(0, uv_poll_init(uv_default_loop(), &poll_handle, fd[0])); + ASSERT_OK(close(fd[0])); +#if defined(__APPLE__) || \ + defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) + fd[0] = open("test/fixtures/empty_file", UV_FS_O_RDONLY); + ASSERT_NE(fd[0], -1); + /* Regular files should be banned from kqueue. */ + ASSERT_NE(0, uv_poll_init(uv_default_loop(), &poll_handle, fd[0])); + ASSERT_OK(close(fd[0])); +#ifdef __APPLE__ + ASSERT_OK(pipe(fd)); + /* Pipes should be permitted in kqueue. */ + ASSERT_EQ(0, uv_poll_init(uv_default_loop(), &poll_handle, fd[0])); + ASSERT_OK(close(fd[0])); + ASSERT_OK(close(fd[1])); + + ASSERT_OK(mkfifo(TEST_POLL_FIFO_PATH, 0600)); + fd[0] = open(TEST_POLL_FIFO_PATH, O_RDONLY | O_NONBLOCK); + ASSERT_NE(fd[0], -1); + fd[1] = open(TEST_POLL_FIFO_PATH, O_WRONLY | O_NONBLOCK); + ASSERT_NE(fd[1], -1); + /* FIFOs should be banned from kqueue. */ + ASSERT_NE(0, uv_poll_init(uv_default_loop(), &poll_handle, fd[0])); + ASSERT_OK(close(fd[0])); + ASSERT_OK(close(fd[1])); + unlink(TEST_POLL_FIFO_PATH); +#endif #endif - ASSERT_NE(fd, -1); - ASSERT_NE(0, uv_poll_init(uv_default_loop(), &poll_handle, fd)); - ASSERT_OK(close(fd)); #endif MAKE_VALGRIND_HAPPY(uv_default_loop()); diff --git a/deps/uv/test/test-ref.c b/deps/uv/test/test-ref.c index dbe94f7168b18d..7a2c33790ab177 100644 --- a/deps/uv/test/test-ref.c +++ b/deps/uv/test/test-ref.c @@ -63,7 +63,7 @@ static void fail_cb2(void) { } -static void req_cb(uv_handle_t* req, int status) { +static void req_cb(uv_udp_send_t* req, int status) { req_cb_called++; } @@ -334,7 +334,7 @@ TEST_IMPL(udp_ref3) { &buf, 1, (const struct sockaddr*) &addr, - (uv_udp_send_cb) req_cb); + req_cb); uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT_EQ(1, req_cb_called); diff --git a/deps/uv/test/test-spawn.c b/deps/uv/test/test-spawn.c index 6a848747036405..efbb2395ff8b2b 100644 --- a/deps/uv/test/test-spawn.c +++ b/deps/uv/test/test-spawn.c @@ -1054,7 +1054,7 @@ TEST_IMPL(kill) { sigaddset(&set, SIGTERM); ASSERT_OK(pthread_sigmask(SIG_BLOCK, &set, NULL)); } - ASSERT_NE(SIG_ERR, signal(SIGTERM, SIG_IGN)); + ASSERT_PTR_NE(SIG_ERR, signal(SIGTERM, SIG_IGN)); #endif r = uv_spawn(uv_default_loop(), &process, &options); @@ -1067,7 +1067,7 @@ TEST_IMPL(kill) { sigaddset(&set, SIGTERM); ASSERT_OK(pthread_sigmask(SIG_UNBLOCK, &set, NULL)); } - ASSERT_NE(SIG_ERR, signal(SIGTERM, SIG_DFL)); + ASSERT_PTR_NE(SIG_ERR, signal(SIGTERM, SIG_DFL)); #endif /* Sending signum == 0 should check if the diff --git a/deps/uv/test/test-tcp-flags.c b/deps/uv/test/test-tcp-flags.c index 30178d706d9418..16218a27f0a3a8 100644 --- a/deps/uv/test/test-tcp-flags.c +++ b/deps/uv/test/test-tcp-flags.c @@ -33,7 +33,8 @@ TEST_IMPL(tcp_flags) { loop = uv_default_loop(); - r = uv_tcp_init(loop, &handle); + /* Use _ex to make sure the socket is created. */ + r = uv_tcp_init_ex(loop, &handle, AF_INET); ASSERT_OK(r); r = uv_tcp_nodelay(&handle, 1); @@ -42,6 +43,12 @@ TEST_IMPL(tcp_flags) { r = uv_tcp_keepalive(&handle, 1, 60); ASSERT_OK(r); + r = uv_tcp_keepalive(&handle, 0, 0); + ASSERT_OK(r); + + r = uv_tcp_keepalive(&handle, 1, 0); + ASSERT_EQ(r, UV_EINVAL); + uv_close((uv_handle_t*)&handle, NULL); r = uv_run(loop, UV_RUN_DEFAULT); diff --git a/deps/uv/test/test-tcp-reuseport.c b/deps/uv/test/test-tcp-reuseport.c new file mode 100644 index 00000000000000..f108b9bbe0fe75 --- /dev/null +++ b/deps/uv/test/test-tcp-reuseport.c @@ -0,0 +1,248 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include +#include +#include + +#if !defined(__linux__) && !defined(__FreeBSD__) && \ + !defined(__DragonFly__) && !defined(__sun) && !defined(_AIX73) + +TEST_IMPL(tcp_reuseport) { + struct sockaddr_in addr; + uv_loop_t* loop; + uv_tcp_t handle; + int r; + + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + loop = uv_default_loop(); + ASSERT_NOT_NULL(loop); + + r = uv_tcp_init(loop, &handle); + ASSERT_OK(r); + + r = uv_tcp_bind(&handle, (const struct sockaddr*) &addr, UV_TCP_REUSEPORT); + ASSERT_EQ(r, UV_ENOTSUP); + + MAKE_VALGRIND_HAPPY(loop); + + return 0; +} + +#else + +#define NUM_LISTENING_THREADS 2 +#define MAX_TCP_CLIENTS 10 + +static uv_tcp_t tcp_connect_handles[MAX_TCP_CLIENTS]; +static uv_connect_t tcp_connect_requests[MAX_TCP_CLIENTS]; + +static uv_sem_t semaphore; + +static uv_mutex_t mutex; +static unsigned int accepted; + +static unsigned int thread_loop1_accepted; +static unsigned int thread_loop2_accepted; +static unsigned int connected; + +static uv_loop_t* main_loop; +static uv_loop_t thread_loop1; +static uv_loop_t thread_loop2; +static uv_tcp_t thread_handle1; +static uv_tcp_t thread_handle2; +static uv_timer_t thread_timer_handle1; +static uv_timer_t thread_timer_handle2; + +static void on_close(uv_handle_t* handle) { + free(handle); +} + +static void ticktack(uv_timer_t* timer) { + ASSERT(timer == &thread_timer_handle1 || timer == &thread_timer_handle2); + + int done = 0; + uv_mutex_lock(&mutex); + if (accepted == MAX_TCP_CLIENTS) { + done = 1; + } + uv_mutex_unlock(&mutex); + + if (done) { + uv_close((uv_handle_t*) timer, NULL); + if (timer->loop == &thread_loop1) + uv_close((uv_handle_t*) &thread_handle1, NULL); + if (timer->loop == &thread_loop2) + uv_close((uv_handle_t*) &thread_handle2, NULL); + } +} + +static void on_connection(uv_stream_t* server, int status) +{ + ASSERT_OK(status); + ASSERT(server == (uv_stream_t*) &thread_handle1 || \ + server == (uv_stream_t*) &thread_handle2); + + uv_tcp_t *client = malloc(sizeof(uv_tcp_t)); + ASSERT_OK(uv_tcp_init(server->loop, client)); + ASSERT_OK(uv_accept(server, (uv_stream_t*) client)); + uv_close((uv_handle_t*) client, on_close); + + if (server->loop == &thread_loop1) + thread_loop1_accepted++; + + if (server->loop == &thread_loop2) + thread_loop2_accepted++; + + uv_mutex_lock(&mutex); + accepted++; + uv_mutex_unlock(&mutex); +} + +static void on_connect(uv_connect_t* req, int status) { + ASSERT_OK(status); + ASSERT_NOT_NULL(req->handle); + ASSERT_PTR_EQ(req->handle->loop, main_loop); + + connected++; + uv_close((uv_handle_t*) req->handle, NULL); +} + +static void create_listener(uv_loop_t* loop, uv_tcp_t* handle) { + struct sockaddr_in addr; + int r; + + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_tcp_init(loop, handle); + ASSERT_OK(r); + + r = uv_tcp_bind(handle, (const struct sockaddr*) &addr, UV_TCP_REUSEPORT); + ASSERT_OK(r); + + r = uv_listen((uv_stream_t*) handle, 128, on_connection); + ASSERT_OK(r); +} + +static void run_event_loop(void* arg) { + int r; + uv_tcp_t* handle; + uv_timer_t* timer; + uv_loop_t* loop = (uv_loop_t*) arg; + ASSERT(loop == &thread_loop1 || loop == &thread_loop2); + + if (loop == &thread_loop1) { + handle = &thread_handle1; + timer = &thread_timer_handle1; + } else { + handle = &thread_handle2; + timer = &thread_timer_handle2; + } + + create_listener(loop, handle); + r = uv_timer_init(loop, timer); + ASSERT_OK(r); + r = uv_timer_start(timer, ticktack, 0, 10); + ASSERT_OK(r); + + /* Notify the main thread to start connecting. */ + uv_sem_post(&semaphore); + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT_OK(r); +} + +TEST_IMPL(tcp_reuseport) { + struct sockaddr_in addr; + int r; + int i; + + r = uv_mutex_init(&mutex); + ASSERT_OK(r); + + r = uv_sem_init(&semaphore, 0); + ASSERT_OK(r); + + main_loop = uv_default_loop(); + ASSERT_NOT_NULL(main_loop); + + /* Run event loops of listeners in separate threads. */ + uv_loop_init(&thread_loop1); + uv_loop_init(&thread_loop2); + uv_thread_t thread_loop_id1; + uv_thread_t thread_loop_id2; + uv_thread_create(&thread_loop_id1, run_event_loop, &thread_loop1); + uv_thread_create(&thread_loop_id2, run_event_loop, &thread_loop2); + + /* Wait until all threads to poll for accepting connections + * before we start to connect. Otherwise the incoming connections + * might not be distributed across all listening threads. */ + for (i = 0; i < NUM_LISTENING_THREADS; i++) + uv_sem_wait(&semaphore); + /* Now we know all threads are up and entering the uv_run(), + * but we still sleep a little bit just for dual fail-safe. */ + uv_sleep(100); + + /* Start connecting to the peers. */ + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + for (i = 0; i < MAX_TCP_CLIENTS; i++) { + r = uv_tcp_init(main_loop, &tcp_connect_handles[i]); + ASSERT_OK(r); + r = uv_tcp_connect(&tcp_connect_requests[i], + &tcp_connect_handles[i], + (const struct sockaddr*) &addr, + on_connect); + ASSERT_OK(r); + } + + r = uv_run(main_loop, UV_RUN_DEFAULT); + ASSERT_OK(r); + + /* Wait for all threads to exit. */ + uv_thread_join(&thread_loop_id1); + uv_thread_join(&thread_loop_id2); + + /* Verify if each listener per event loop accepted connections + * and the amount of accepted connections matches the one of + * connected connections. + */ + ASSERT_EQ(accepted, MAX_TCP_CLIENTS); + ASSERT_EQ(connected, MAX_TCP_CLIENTS); + ASSERT_GT(thread_loop1_accepted, 0); + ASSERT_GT(thread_loop2_accepted, 0); + ASSERT_EQ(thread_loop1_accepted + thread_loop2_accepted, connected); + + /* Clean up. */ + uv_mutex_destroy(&mutex); + + uv_sem_destroy(&semaphore); + + uv_loop_close(&thread_loop1); + uv_loop_close(&thread_loop2); + MAKE_VALGRIND_HAPPY(main_loop); + + return 0; +} + +#endif diff --git a/deps/uv/test/test-tmpdir.c b/deps/uv/test/test-tmpdir.c index a4e9ce950aaa12..c8fc8e06a8b6d1 100644 --- a/deps/uv/test/test-tmpdir.c +++ b/deps/uv/test/test-tmpdir.c @@ -76,6 +76,13 @@ TEST_IMPL(tmpdir) { size_t lenx = sizeof tmpdirx; r = uv_os_tmpdir(tmpdirx, &lenx); ASSERT_OK(r); + + /* Test empty environment variable */ + r = uv_os_setenv("TMP", ""); + ASSERT_EQ(r, 0); + len = sizeof tmpdir; + r = uv_os_tmpdir(tmpdir, &len); + ASSERT_EQ(r, UV_ENOENT); #endif return 0; diff --git a/deps/uv/test/test-udp-multicast-join.c b/deps/uv/test/test-udp-multicast-join.c index 7e8fbe39f08c3b..9e322dc579fc33 100644 --- a/deps/uv/test/test-udp-multicast-join.c +++ b/deps/uv/test/test-udp-multicast-join.c @@ -126,7 +126,7 @@ static void cl_recv_cb(uv_udp_t* handle, r = uv_udp_set_membership(&server, MULTICAST_ADDR, NULL, UV_LEAVE_GROUP); ASSERT_OK(r); -#if !defined(__OpenBSD__) && !defined(__NetBSD__) +#if !defined(__NetBSD__) r = uv_udp_set_source_membership(&server, MULTICAST_ADDR, NULL, source_addr, UV_JOIN_GROUP); ASSERT_OK(r); #endif diff --git a/deps/uv/test/test-udp-multicast-join6.c b/deps/uv/test/test-udp-multicast-join6.c index 11efb0a6f67d78..c6872e4283247d 100644 --- a/deps/uv/test/test-udp-multicast-join6.c +++ b/deps/uv/test/test-udp-multicast-join6.c @@ -192,6 +192,11 @@ TEST_IMPL(udp_multicast_join6) { ASSERT_OK(r); +#if defined(__ANDROID__) + /* It returns an ENOSYS error */ + RETURN_SKIP("Test does not currently work in ANDROID"); +#endif + /* TODO(gengjiawen): Fix test on QEMU. */ #if defined(__QEMU__) RETURN_SKIP("Test does not currently work in QEMU"); diff --git a/deps/uv/test/test-udp-recv-in-a-row.c b/deps/uv/test/test-udp-recv-in-a-row.c index 30745def0f602d..0d97e5bf529431 100644 --- a/deps/uv/test/test-udp-recv-in-a-row.c +++ b/deps/uv/test/test-udp-recv-in-a-row.c @@ -50,11 +50,16 @@ static void sv_recv_cb(uv_udp_t* handle, const uv_buf_t* rcvbuf, const struct sockaddr* addr, unsigned flags) { - if (++ recv_cnt < N) { - ASSERT_EQ(sizeof(send_data), nread); - } else { - ASSERT_OK(nread); - } + /* |nread| can be zero when the kernel drops an incoming datagram after + * marking the file descriptor as readable but before libuv has a chance + * to receive it. Libuv still invokes the uv_udp_recv_cb callback to give + * back the memory from the uv_alloc_cb callback. + * + * See https://github.com/libuv/libuv/issues/4219. + */ + recv_cnt++; + if (nread > 0) + ASSERT_EQ(nread, sizeof(send_data)); } static void check_cb(uv_check_t* handle) { @@ -63,9 +68,11 @@ static void check_cb(uv_check_t* handle) { /** * sv_recv_cb() is called with nread set to zero to indicate * there is no more udp packet in the kernel, so the actual - * recv_cnt is one larger than N. + * recv_cnt is up to one larger than N. UDP being what it is, + * packets can get dropped so don't assume an exact count. */ - ASSERT_EQ(N+1, recv_cnt); + ASSERT_GE(recv_cnt, 1); + ASSERT_LE(recv_cnt, N+1); check_cb_called = 1; /* we are done */ diff --git a/deps/uv/test/test-udp-reuseport.c b/deps/uv/test/test-udp-reuseport.c new file mode 100644 index 00000000000000..7d4db40806f628 --- /dev/null +++ b/deps/uv/test/test-udp-reuseport.c @@ -0,0 +1,287 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include +#include +#include + +#if !defined(__linux__) && !defined(__FreeBSD__) && \ + !defined(__DragonFly__) && !defined(__sun) && !defined(_AIX73) + +TEST_IMPL(udp_reuseport) { + struct sockaddr_in addr1, addr2, addr3; + uv_loop_t* loop; + uv_udp_t handle1, handle2, handle3; + int r; + + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr1)); + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT_2, &addr2)); + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT_3, &addr3)); + + loop = uv_default_loop(); + ASSERT_NOT_NULL(loop); + + r = uv_udp_init(loop, &handle1); + ASSERT_OK(r); + + r = uv_udp_bind(&handle1, (const struct sockaddr*) &addr1, UV_UDP_REUSEADDR); + ASSERT_OK(r); + + r = uv_udp_init(loop, &handle2); + ASSERT_OK(r); + + r = uv_udp_bind(&handle2, (const struct sockaddr*) &addr2, UV_UDP_REUSEPORT); + ASSERT_EQ(r, UV_ENOTSUP); + + r = uv_udp_init(loop, &handle3); + ASSERT_OK(r); + + /* For platforms where SO_REUSEPORTs don't have the capability of + * load balancing, specifying both UV_UDP_REUSEADDR and UV_UDP_REUSEPORT + * in flags will fail, returning an UV_ENOTSUP error. */ + r = uv_udp_bind(&handle3, (const struct sockaddr*) &addr3, + UV_UDP_REUSEADDR | UV_UDP_REUSEPORT); + ASSERT_EQ(r, UV_ENOTSUP); + + MAKE_VALGRIND_HAPPY(loop); + + return 0; +} + +#else + +#define NUM_RECEIVING_THREADS 2 +#define MAX_UDP_DATAGRAMS 10 + +static uv_udp_t udp_send_handles[MAX_UDP_DATAGRAMS]; +static uv_udp_send_t udp_send_requests[MAX_UDP_DATAGRAMS]; + +static uv_sem_t semaphore; + +static uv_mutex_t mutex; +static unsigned int received; + +static unsigned int thread_loop1_recv; +static unsigned int thread_loop2_recv; +static unsigned int sent; + +static uv_loop_t* main_loop; +static uv_loop_t thread_loop1; +static uv_loop_t thread_loop2; +static uv_udp_t thread_handle1; +static uv_udp_t thread_handle2; +static uv_timer_t thread_timer_handle1; +static uv_timer_t thread_timer_handle2; + +static void alloc_cb(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) { + buf->base = malloc(suggested_size); + buf->len = (int) suggested_size; +} + +static void ticktack(uv_timer_t* timer) { + int done = 0; + + ASSERT(timer == &thread_timer_handle1 || timer == &thread_timer_handle2); + + uv_mutex_lock(&mutex); + if (received == MAX_UDP_DATAGRAMS) { + done = 1; + } + uv_mutex_unlock(&mutex); + + if (done) { + uv_close((uv_handle_t*) timer, NULL); + if (timer->loop == &thread_loop1) + uv_close((uv_handle_t*) &thread_handle1, NULL); + if (timer->loop == &thread_loop2) + uv_close((uv_handle_t*) &thread_handle2, NULL); + } +} + +static void on_recv(uv_udp_t* handle, + ssize_t nr, + const uv_buf_t* buf, + const struct sockaddr* addr, + unsigned flags) { + ASSERT_OK(flags); + ASSERT(handle == &thread_handle1 || handle == &thread_handle2); + + ASSERT_GE(nr, 0); + + if (nr == 0) { + ASSERT_NULL(addr); + free(buf->base); + return; + } + + ASSERT_NOT_NULL(addr); + ASSERT_EQ(5, nr); + ASSERT(!memcmp("Hello", buf->base, nr)); + free(buf->base); + + if (handle->loop == &thread_loop1) + thread_loop1_recv++; + + if (handle->loop == &thread_loop2) + thread_loop2_recv++; + + uv_mutex_lock(&mutex); + received++; + uv_mutex_unlock(&mutex); +} + +static void on_send(uv_udp_send_t* req, int status) { + ASSERT_OK(status); + ASSERT_PTR_EQ(req->handle->loop, main_loop); + + if (++sent == MAX_UDP_DATAGRAMS) + uv_close((uv_handle_t*) req->handle, NULL); +} + +static void bind_socket_and_prepare_recv(uv_loop_t* loop, uv_udp_t* handle) { + struct sockaddr_in addr; + int r; + + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_udp_init(loop, handle); + ASSERT_OK(r); + + /* For platforms where SO_REUSEPORTs have the capability of + * load balancing, specifying both UV_UDP_REUSEADDR and + * UV_UDP_REUSEPORT in flags is allowed and SO_REUSEPORT will + * always override the behavior of SO_REUSEADDR. */ + r = uv_udp_bind(handle, (const struct sockaddr*) &addr, + UV_UDP_REUSEADDR | UV_UDP_REUSEPORT); + ASSERT_OK(r); + + r = uv_udp_recv_start(handle, alloc_cb, on_recv); + ASSERT_OK(r); +} + +static void run_event_loop(void* arg) { + int r; + uv_udp_t* handle; + uv_timer_t* timer; + uv_loop_t* loop = (uv_loop_t*) arg; + ASSERT(loop == &thread_loop1 || loop == &thread_loop2); + + if (loop == &thread_loop1) { + handle = &thread_handle1; + timer = &thread_timer_handle1; + } else { + handle = &thread_handle2; + timer = &thread_timer_handle2; + } + + bind_socket_and_prepare_recv(loop, handle); + r = uv_timer_init(loop, timer); + ASSERT_OK(r); + r = uv_timer_start(timer, ticktack, 0, 10); + ASSERT_OK(r); + + /* Notify the main thread to start sending data. */ + uv_sem_post(&semaphore); + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT_OK(r); +} + +TEST_IMPL(udp_reuseport) { + struct sockaddr_in addr; + uv_buf_t buf; + int r; + int i; + + r = uv_mutex_init(&mutex); + ASSERT_OK(r); + + r = uv_sem_init(&semaphore, 0); + ASSERT_OK(r); + + main_loop = uv_default_loop(); + ASSERT_NOT_NULL(main_loop); + + /* Run event loops of receiving sockets in separate threads. */ + uv_loop_init(&thread_loop1); + uv_loop_init(&thread_loop2); + uv_thread_t thread_loop_id1; + uv_thread_t thread_loop_id2; + uv_thread_create(&thread_loop_id1, run_event_loop, &thread_loop1); + uv_thread_create(&thread_loop_id2, run_event_loop, &thread_loop2); + + /* Wait until all threads to poll for receiving datagrams + * before we start to sending. Otherwise the incoming datagrams + * might not be distributed across all receiving threads. */ + for (i = 0; i < NUM_RECEIVING_THREADS; i++) + uv_sem_wait(&semaphore); + /* Now we know all threads are up and entering the uv_run(), + * but we still sleep a little bit just for dual fail-safe. */ + uv_sleep(100); + + /* Start sending datagrams to the peers. */ + buf = uv_buf_init("Hello", 5); + ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + for (i = 0; i < MAX_UDP_DATAGRAMS; i++) { + r = uv_udp_init(main_loop, &udp_send_handles[i]); + ASSERT_OK(r); + r = uv_udp_send(&udp_send_requests[i], + &udp_send_handles[i], + &buf, + 1, + (const struct sockaddr*) &addr, + on_send); + ASSERT_OK(r); + } + + r = uv_run(main_loop, UV_RUN_DEFAULT); + ASSERT_OK(r); + + /* Wait for all threads to exit. */ + uv_thread_join(&thread_loop_id1); + uv_thread_join(&thread_loop_id2); + + /* Verify if each receiving socket per event loop received datagrams + * and the amount of received datagrams matches the one of sent datagrams. + */ + ASSERT_EQ(received, MAX_UDP_DATAGRAMS); + ASSERT_EQ(sent, MAX_UDP_DATAGRAMS); + ASSERT_GT(thread_loop1_recv, 0); + ASSERT_GT(thread_loop2_recv, 0); + ASSERT_EQ(thread_loop1_recv + thread_loop2_recv, sent); + + /* Clean up. */ + uv_mutex_destroy(&mutex); + + uv_sem_destroy(&semaphore); + + uv_loop_close(&thread_loop1); + uv_loop_close(&thread_loop2); + MAKE_VALGRIND_HAPPY(main_loop); + + return 0; +} + +#endif diff --git a/deps/uv/uv.gyp b/deps/uv/uv.gyp index 6c86c3fa50496d..fea86b4af7826e 100644 --- a/deps/uv/uv.gyp +++ b/deps/uv/uv.gyp @@ -124,6 +124,7 @@ 'uv_sources_apple': [ 'src/unix/darwin.c', 'src/unix/fsevents.c', + 'src/unix/darwin-syscalls.h', 'src/unix/darwin-proctitle.c', 'src/unix/random-getentropy.c', ], diff --git a/doc/api/cli.md b/doc/api/cli.md index f37684d21f517c..1a9d61138e23f1 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -3467,23 +3467,6 @@ threadpool by setting the `'UV_THREADPOOL_SIZE'` environment variable to a value greater than `4` (its current default value). For more information, see the [libuv threadpool documentation][]. -### `UV_USE_IO_URING=value` - -Enable or disable libuv's use of `io_uring` on supported platforms. - -On supported platforms, `io_uring` can significantly improve the performance of -various asynchronous I/O operations. - -`io_uring` is disabled by default due to security concerns. When `io_uring` -is enabled, applications must not change the user identity of the process at -runtime. In this case, JavaScript functions such as [`process.setuid()`][] are -unavailable, and native addons must not invoke system functions such as -[`setuid(2)`][]. - -This environment variable is implemented by a dependency of Node.js and may be -removed in future versions of Node.js. No stability guarantees are provided for -the behavior of this environment variable. - ## Useful V8 options V8 has its own set of CLI options. Any V8 CLI option that is provided to `node` @@ -3644,8 +3627,6 @@ node --stack-trace-limit=12 -p -e "Error.stackTraceLimit" # prints 12 [`net.getDefaultAutoSelectFamilyAttemptTimeout()`]: net.md#netgetdefaultautoselectfamilyattempttimeout [`node:sqlite`]: sqlite.md [`process.setUncaughtExceptionCaptureCallback()`]: process.md#processsetuncaughtexceptioncapturecallbackfn -[`process.setuid()`]: process.md#processsetuidid -[`setuid(2)`]: https://man7.org/linux/man-pages/man2/setuid.2.html [`tls.DEFAULT_MAX_VERSION`]: tls.md#tlsdefault_max_version [`tls.DEFAULT_MIN_VERSION`]: tls.md#tlsdefault_min_version [`unhandledRejection`]: process.md#event-unhandledrejection diff --git a/src/node_credentials.cc b/src/node_credentials.cc index 65fdd145167139..2a7f2e878bc953 100644 --- a/src/node_credentials.cc +++ b/src/node_credentials.cc @@ -228,31 +228,13 @@ static gid_t gid_by_name(Isolate* isolate, Local value) { } } -#ifdef __linux__ -extern "C" { -int uv__node_patch_is_using_io_uring(void); - -int uv__node_patch_is_using_io_uring(void) __attribute__((weak)); - -typedef int (*is_using_io_uring_fn)(void); -} -#endif // __linux__ - static bool UvMightBeUsingIoUring() { #ifdef __linux__ - // Support for io_uring is only included in libuv 1.45.0 and later, and only - // on Linux (and Android, but there it is always disabled). The patch that we - // apply to libuv to work around the io_uring security issue adds a function - // that tells us whether io_uring is being used. If that function is not - // present, we assume that we are dynamically linking against an unpatched - // version. - static std::atomic check = - uv__node_patch_is_using_io_uring; - if (check == nullptr) { - check = reinterpret_cast( - dlsym(RTLD_DEFAULT, "uv__node_patch_is_using_io_uring")); - } - return uv_version() >= 0x012d00u && (check == nullptr || (*check)()); + // Support for io_uring is only included in libuv 1.45.0 and later. Starting + // with 1.49.0 is disabled by default. Check the version in case Node.js is + // dynamically to an io_uring-enabled version of libuv. + unsigned int version = uv_version(); + return version >= 0x012d00u && version < 0x013100u; #else return false; #endif diff --git a/test/parallel/test-process-setuid-io-uring.js b/test/parallel/test-process-setuid-io-uring.js deleted file mode 100644 index 93193ac2f8ab99..00000000000000 --- a/test/parallel/test-process-setuid-io-uring.js +++ /dev/null @@ -1,43 +0,0 @@ -'use strict'; -const common = require('../common'); - -const assert = require('node:assert'); -const { execFileSync } = require('node:child_process'); - -if (!common.isLinux) { - common.skip('test is Linux specific'); -} - -if (process.arch !== 'x64' && process.arch !== 'arm64') { - common.skip('io_uring support on this architecture is uncertain'); -} - -const kv = /^(\d+)\.(\d+)\.(\d+)/.exec(execFileSync('uname', ['-r'])).slice(1).map((n) => parseInt(n, 10)); -if (((kv[0] << 16) | (kv[1] << 8) | kv[2]) < 0x050ABA) { - common.skip('io_uring is likely buggy due to old kernel'); -} - -const userIdentitySetters = [ - ['setuid', [1000]], - ['seteuid', [1000]], - ['setgid', [1000]], - ['setegid', [1000]], - ['setgroups', [[1000]]], - ['initgroups', ['nodeuser', 1000]], -]; - -for (const [fnName, args] of userIdentitySetters) { - const call = `process.${fnName}(${args.map((a) => JSON.stringify(a)).join(', ')})`; - const code = `try { ${call}; } catch (err) { console.log(err); }`; - - const stdout = execFileSync(process.execPath, ['-e', code], { - encoding: 'utf8', - env: { ...process.env, UV_USE_IO_URING: '1' }, - }); - - const msg = new RegExp(`^Error: ${fnName}\\(\\) disabled: io_uring may be enabled\\. See CVE-[X0-9]{4}-`); - assert.match(stdout, msg); - assert.match(stdout, /code: 'ERR_INVALID_STATE'/); - - console.log(call, stdout.slice(0, stdout.indexOf('\n'))); -}