Commit d221b203 authored by Jason Madden's avatar Jason Madden Committed by GitHub

Merge pull request #1284 from gevent/libuv-1.23.2

Update to libuv 1.23.2
parents 85d0dd3d ac1f6eed
......@@ -11,6 +11,7 @@
objects that have no value in the greenlet. See :issue:`1275`.
- Fixed negative length in pywsgi's Input read functions for non chunked body.
Reported in :issue:`1274` by tzickel.
- Upgrabe libuv from 1.22.0 to 1.23.2.
1.3.6 (2018-08-17)
==================
......
......@@ -18,3 +18,4 @@ Updating libuv
- rm -rf libuv/samples
- rm -rf libuv/test
- rm -rf libuv/tools
- rm -f libuv/android-configure*
......@@ -345,3 +345,10 @@ Peter Johnson <johnson.peter@gmail.com>
Paolo Greppi <paolo.greppi@libpf.com>
Shelley Vohr <shelley.vohr@gmail.com>
Ujjwal Sharma <usharma1998@gmail.com>
Michał Kozakiewicz <michalkozakiewicz3@gmail.com>
Emil Bay <github@tixz.dk>
Jeremiah Senkpiel <fishrock123@rocketmail.com>
Andy Zhang <zhangyong232@gmail.com>
dmabupt <dmabupt@gmail.com>
Ryan Liptak <squeek502@hotmail.com>
Ali Ijaz Sheikh <ofrobots@google.com>
......@@ -98,6 +98,7 @@ set(uv_test_sources
test/test-poll-closesocket.c
test/test-poll-oob.c
test/test-poll.c
test/test-process-priority.c
test/test-process-title-threadsafe.c
test/test-process-title.c
test/test-queue-foreach-delete.c
......@@ -192,7 +193,6 @@ if(WIN32)
src/win/poll.c
src/win/process.c
src/win/process-stdio.c
src/win/req.c
src/win/signal.c
src/win/snprintf.c
src/win/stream.c
......
2018.07.11, Version 1.22.0 (Stable)
2018.10.09, Version 1.23.2 (Stable)
Changes since version 1.23.1:
* unix: return 0 retrieving rss on cygwin (cjihrig)
* unix: initialize uv_interface_address_t.phys_addr (cjihrig)
* test: handle uv_os_setpriority() windows edge case (cjihrig)
* tty, win: fix read stop for raw mode (Bartosz Sosnowski)
* Revert "Revert "unix,fs: fix for potential partial reads/writes"" (Jameson
Nash)
* unix,readv: always permit partial reads to return (Jameson Nash)
* win,tty: fix uv_tty_close() (Bartosz Sosnowski)
* doc: remove extraneous "on" (Ben Noordhuis)
* unix,win: fix threadpool race condition (Anna Henningsen)
* unix: rework thread barrier implementation (Ben Noordhuis)
* aix: switch to libuv's own thread barrier impl (Ben Noordhuis)
* unix: signal done to last thread barrier waiter (Ben Noordhuis)
* test: add uv_barrier_wait serial thread test (Ali Ijaz Sheikh)
* unix: optimize uv_fs_readlink() memory allocation (Ben Noordhuis)
* win: remove req.c and other cleanup (Carlo Marcelo Arenas Belón)
* aix: don't EISDIR on read from directory fd (Ben Noordhuis)
2018.09.22, Version 1.23.1 (Stable), d2282b3d67821dc53c907c2155fa8c5c6ce25180
Changes since version 1.23.0:
* unix,win: limit concurrent DNS calls to nthreads/2 (Anna Henningsen)
* doc: add addaleax to maintainers (Anna Henningsen)
* doc: add missing slash in stream.rst (Emil Bay)
* unix,fs: use utimes & friends for uv_fs_utime (Jeremiah Senkpiel)
* unix,fs: remove linux fallback from utimesat() (Jeremiah Senkpiel)
* unix,fs: remove uv__utimesat() syscall fallback (Jeremiah Senkpiel)
* doc: fix argument name in tcp.rts (Emil Bay)
* doc: notes on running tests, benchmarks, tools (Jamie Davis)
* linux: remove epoll syscall wrappers (Ben Noordhuis)
* linux: drop code path for epoll_pwait-less kernels (Ben Noordhuis)
* Partially revert "win,code: remove GetQueuedCompletionStatus-based poller"
(Jameson Nash)
* build: add compile for android arm64/x86/x86-64 (Andy Zhang)
* doc: clarify that some remarks apply to windows (Bert Belder)
* test: fix compiler warnings (Jamie Davis)
* ibmi: return 0 from uv_resident_set_memory() (dmabupt)
* win: fix uv_udp_recv_start() error translation (Ryan Liptak)
* win,doc: improve uv_os_setpriority() documentation (Bartosz Sosnowski)
* test: increase upper bound in condvar_5 (Jamie Davis)
* win,tty: remove deadcode (Jameson Nash)
* stream: autodetect direction (Jameson Nash)
2018.08.18, Version 1.23.0 (Stable), 7ebb26225f2eaae6db22f4ef34ce76fa16ff89ec
Changes since version 1.22.0:
* win,pipe: restore compatibility with the old IPC framing protocol (Bert
Belder)
* fs: add uv_open_osfhandle (Bartosz Sosnowski)
* doc: update Visual C++ Build Tools URL (Michał Kozakiewicz)
* unix: loop starvation on successful write complete (jBarz)
* win: add uv__getnameinfo_work() error handling (A. Hauptmann)
* win: return UV_ENOMEM from uv_loop_init() (cjihrig)
* unix,win: add uv_os_{get,set}priority() (cjihrig)
* test: fix warning in test-tcp-open (Santiago Gimeno)
2018.07.11, Version 1.22.0 (Stable), 8568f78a777d79d35eb7d6994617267b9fb33967
Changes since version 1.21.0:
......
......@@ -3,6 +3,7 @@
libuv is currently managed by the following individuals:
* **Anna Henningsen** ([@addaleax](https://github.com/addaleax))
* **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz))
* **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis))
- GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis)
......
......@@ -68,7 +68,6 @@ libuv_la_SOURCES += src/win/async.c \
src/win/poll.c \
src/win/process-stdio.c \
src/win/process.c \
src/win/req.c \
src/win/req-inl.h \
src/win/signal.c \
src/win/stream.c \
......@@ -224,6 +223,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-poll-close-doesnt-corrupt-stack.c \
test/test-poll-closesocket.c \
test/test-poll-oob.c \
test/test-process-priority.c \
test/test-process-title.c \
test/test-process-title-threadsafe.c \
test/test-queue-foreach-delete.c \
......@@ -339,8 +339,7 @@ libuv_la_SOURCES += src/unix/aix.c src/unix/aix-common.c
endif
if ANDROID
uvinclude_HEADERS += include/uv/android-ifaddrs.h \
include/uv/pthread-barrier.h
uvinclude_HEADERS += include/uv/android-ifaddrs.h
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
src/unix/pthread-fixes.c
endif
......@@ -360,8 +359,7 @@ libuv_la_SOURCES += src/unix/cygwin.c \
endif
if DARWIN
uvinclude_HEADERS += include/uv/darwin.h \
include/uv/pthread-barrier.h
uvinclude_HEADERS += include/uv/darwin.h
libuv_la_CFLAGS += -D_DARWIN_USE_64_BIT_INODE=1
libuv_la_CFLAGS += -D_DARWIN_UNLIMITED_SELECT=1
libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \
......@@ -444,7 +442,6 @@ libuv_la_SOURCES += src/unix/no-proctitle.c \
endif
if OS390
uvinclude_HEADERS += include/uv/pthread-barrier.h
libuv_la_CFLAGS += -D_UNIX03_THREADS \
-D_UNIX03_SOURCE \
-D_OPEN_SYS_IF_EXT=1 \
......
......@@ -282,8 +282,31 @@ Make sure that you specify the architecture you wish to build for in the
Run:
For arm
```bash
$ source ./android-configure-arm NDK_PATH gyp [API_LEVEL]
$ make -C out
```
or for arm64
```bash
$ source ./android-configure-arm64 NDK_PATH gyp [API_LEVEL]
$ make -C out
```
or for x86
```bash
$ source ./android-configure-x86 NDK_PATH gyp [API_LEVEL]
$ make -C out
```
or for x86_64
```bash
$ source ./android-configure NDK_PATH gyp [API_LEVEL]
$ source ./android-configure-x86_64 NDK_PATH gyp [API_LEVEL]
$ make -C out
```
......@@ -310,14 +333,66 @@ $ ninja -C out/Release
### Running tests
Run:
#### Build
Build (includes tests):
```bash
$ ./gyp_uv.py -f make
$ make -C out
```
#### Run all tests
```bash
$ ./out/Debug/run-tests
```
#### Run one test
The list of all tests is in `test/test-list.h`.
This invocation will cause the `run-tests` driver to fork and execute `TEST_NAME` in a child process:
```bash
$ ./out/Debug/run-tests TEST_NAME
```
This invocation will cause the `run-tests` driver to execute the test within the `run-tests` process:
```bash
$ ./out/Debug/run-tests TEST_NAME TEST_NAME
```
#### Debugging tools
When running the test from within the `run-tests` process (`run-tests TEST_NAME TEST_NAME`), tools like gdb and valgrind work normally.
When running the test from a child of the `run-tests` process (`run-tests TEST_NAME`), use these tools in a fork-aware manner.
##### Fork-aware gdb
Use the [follow-fork-mode](https://sourceware.org/gdb/onlinedocs/gdb/Forks.html) setting:
```
$ gdb --args out/Debug/run-tests TEST_NAME
(gdb) set follow-fork-mode child
...
```
##### Fork-aware valgrind
Use the `--trace-children=yes` parameter:
```bash
$ valgrind --trace-children=yes -v --tool=memcheck --leak-check=full --track-origins=yes --leak-resolution=high --show-reachable=yes --log-file=memcheck.log out/Debug/run-tests TEST_NAME
```
### Running benchmarks
See the section on running tests.
The benchmark driver is `out/Debug/run-benchmarks` and the benchmarks are listed in `test/benchmark-list.h`.
## Supported Platforms
Check the [SUPPORTED_PLATFORMS file](SUPPORTED_PLATFORMS.md).
......@@ -349,7 +424,7 @@ See the [guidelines for contributing][].
[libuv_banner]: https://raw.githubusercontent.com/libuv/libuv/master/img/banner.png
[x32]: https://en.wikipedia.org/wiki/X32_ABI
[Python 2.6 or 2.7]: https://www.python.org/downloads/
[Visual C++ Build Tools]: http://landinghub.visualstudio.com/visual-cpp-build-tools
[Visual C++ Build Tools]: https://visualstudio.microsoft.com/visual-cpp-build-tools/
[Visual Studio 2015 Update 3]: https://www.visualstudio.com/vs/older-downloads/
[Visual Studio 2017]: https://www.visualstudio.com/downloads/
[Git for Windows]: http://git-scm.com/download/win
#!/bin/bash
export TOOLCHAIN=$PWD/android-toolchain
mkdir -p $TOOLCHAIN
API=${3:-24}
$1/build/tools/make-standalone-toolchain.sh \
--toolchain=arm-linux-androideabi-4.9 \
--arch=arm \
--install-dir=$TOOLCHAIN \
--platform=android-$API \
--force
export PATH=$TOOLCHAIN/bin:$PATH
export AR=arm-linux-androideabi-ar
export CC=arm-linux-androideabi-gcc
export CXX=arm-linux-androideabi-g++
export LINK=arm-linux-androideabi-g++
export PLATFORM=android
export CFLAGS="-D__ANDROID_API__=$API"
if [[ $2 == 'gyp' ]]
then
./gyp_uv.py -Dtarget_arch=arm -DOS=android -f make-android
fi
......@@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [1.22.0], [https://github.com/libuv/libuv/issues])
AC_INIT([libuv], [1.23.2], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
......
......@@ -1065,6 +1065,7 @@ UV_EXTERN int uv_set_process_title(const char* title);
UV_EXTERN int uv_resident_set_memory(size_t* rss);
UV_EXTERN int uv_uptime(double* uptime);
UV_EXTERN uv_os_fd_t uv_get_osfhandle(int fd);
UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd);
typedef struct {
long tv_sec;
......@@ -1099,6 +1100,16 @@ UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd);
UV_EXTERN uv_pid_t uv_os_getpid(void);
UV_EXTERN uv_pid_t uv_os_getppid(void);
#define UV_PRIORITY_LOW 19
#define UV_PRIORITY_BELOW_NORMAL 10
#define UV_PRIORITY_NORMAL 0
#define UV_PRIORITY_ABOVE_NORMAL -7
#define UV_PRIORITY_HIGH -14
#define UV_PRIORITY_HIGHEST -20
UV_EXTERN int uv_os_getpriority(uv_pid_t pid, int* priority);
UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority);
UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);
......
/*
Copyright (c) 2016, Kari Tristan Helgason <kthelgason@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _UV_PTHREAD_BARRIER_
#define _UV_PTHREAD_BARRIER_
#include <errno.h>
#include <pthread.h>
#if !defined(__MVS__)
#include <semaphore.h> /* sem_t */
#endif
#define PTHREAD_BARRIER_SERIAL_THREAD 0x12345
#define UV__PTHREAD_BARRIER_FALLBACK 1
/*
* To maintain ABI compatibility with
* libuv v1.x struct is padded according
* to target platform
*/
#if defined(__ANDROID__)
# define UV_BARRIER_STRUCT_PADDING \
sizeof(pthread_mutex_t) + \
sizeof(pthread_cond_t) + \
sizeof(unsigned int) - \
sizeof(void *)
#elif defined(__APPLE__)
# define UV_BARRIER_STRUCT_PADDING \
sizeof(pthread_mutex_t) + \
2 * sizeof(sem_t) + \
2 * sizeof(unsigned int) - \
sizeof(void *)
#else
# define UV_BARRIER_STRUCT_PADDING 0
#endif
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
unsigned threshold;
unsigned in;
unsigned out;
} _uv_barrier;
typedef struct {
_uv_barrier* b;
char _pad[UV_BARRIER_STRUCT_PADDING];
} pthread_barrier_t;
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count);
int pthread_barrier_wait(pthread_barrier_t* barrier);
int pthread_barrier_destroy(pthread_barrier_t *barrier);
#endif /* _UV_PTHREAD_BARRIER_ */
......@@ -66,10 +66,6 @@
# include "uv/posix.h"
#endif
#ifndef PTHREAD_BARRIER_SERIAL_THREAD
# include "uv/pthread-barrier.h"
#endif
#ifndef NI_MAXHOST
# define NI_MAXHOST 1025
#endif
......@@ -136,8 +132,28 @@ typedef pthread_rwlock_t uv_rwlock_t;
typedef UV_PLATFORM_SEM_T uv_sem_t;
typedef pthread_cond_t uv_cond_t;
typedef pthread_key_t uv_key_t;
typedef pthread_barrier_t uv_barrier_t;
/* Note: guard clauses should match uv_barrier_init's in src/unix/thread.c. */
#if defined(_AIX) || !defined(PTHREAD_BARRIER_SERIAL_THREAD)
/* TODO(bnoordhuis) Merge into uv_barrier_t in v2. */
struct _uv_barrier {
uv_mutex_t mutex;
uv_cond_t cond;
unsigned threshold;
unsigned in;
unsigned out;
};
typedef struct {
struct _uv_barrier* b;
# if defined(PTHREAD_BARRIER_SERIAL_THREAD)
/* TODO(bnoordhuis) Remove padding in v2. */
char pad[sizeof(pthread_barrier_t) - sizeof(struct _uv_barrier*)];
# endif
} uv_barrier_t;
#else
typedef pthread_barrier_t uv_barrier_t;
#endif
/* Platform-specific definitions for uv_spawn support. */
typedef gid_t uv_gid_t;
......
......@@ -31,8 +31,8 @@
*/
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 22
#define UV_VERSION_PATCH 0
#define UV_VERSION_MINOR 23
#define UV_VERSION_PATCH 2
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""
......
......@@ -33,12 +33,18 @@ static uv_once_t once = UV_ONCE_INIT;
static uv_cond_t cond;
static uv_mutex_t mutex;
static unsigned int idle_threads;
static unsigned int slow_io_work_running;
static unsigned int nthreads;
static uv_thread_t* threads;
static uv_thread_t default_threads[4];
static QUEUE exit_message;
static QUEUE wq;
static QUEUE run_slow_work_message;
static QUEUE slow_io_pending_wq;
static unsigned int slow_work_thread_threshold(void) {
return (nthreads + 1) / 2;
}
static void uv__cancelled(struct uv__work* w) {
abort();
......@@ -51,34 +57,67 @@ static void uv__cancelled(struct uv__work* w) {
static void worker(void* arg) {
struct uv__work* w;
QUEUE* q;
int is_slow_work;
uv_sem_post((uv_sem_t*) arg);
arg = NULL;
uv_mutex_lock(&mutex);
for (;;) {
uv_mutex_lock(&mutex);
while (QUEUE_EMPTY(&wq)) {
/* `mutex` should always be locked at this point. */
/* Keep waiting while either no work is present or only slow I/O
and we're at the threshold for that. */
while (QUEUE_EMPTY(&wq) ||
(QUEUE_HEAD(&wq) == &run_slow_work_message &&
QUEUE_NEXT(&run_slow_work_message) == &wq &&
slow_io_work_running >= slow_work_thread_threshold())) {
idle_threads += 1;
uv_cond_wait(&cond, &mutex);
idle_threads -= 1;
}
q = QUEUE_HEAD(&wq);
if (q == &exit_message)
if (q == &exit_message) {
uv_cond_signal(&cond);
else {
uv_mutex_unlock(&mutex);
break;
}
QUEUE_REMOVE(q);
QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */
is_slow_work = 0;
if (q == &run_slow_work_message) {
/* If we're at the slow I/O threshold, re-schedule until after all
other work in the queue is done. */
if (slow_io_work_running >= slow_work_thread_threshold()) {
QUEUE_INSERT_TAIL(&wq, q);
continue;
}
/* If we encountered a request to run slow I/O work but there is none
to run, that means it's cancelled => Start over. */
if (QUEUE_EMPTY(&slow_io_pending_wq))
continue;
is_slow_work = 1;
slow_io_work_running++;
q = QUEUE_HEAD(&slow_io_pending_wq);
QUEUE_REMOVE(q);
QUEUE_INIT(q); /* Signal uv_cancel() that the work req is
executing. */
QUEUE_INIT(q);
/* If there is more slow I/O work, schedule it to be run as well. */
if (!QUEUE_EMPTY(&slow_io_pending_wq)) {
QUEUE_INSERT_TAIL(&wq, &run_slow_work_message);
if (idle_threads > 0)
uv_cond_signal(&cond);
}
}
uv_mutex_unlock(&mutex);
if (q == &exit_message)
break;
w = QUEUE_DATA(q, struct uv__work, wq);
w->work(w);
......@@ -88,12 +127,32 @@ static void worker(void* arg) {
QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
/* Lock `mutex` since that is expected at the start of the next
* iteration. */
uv_mutex_lock(&mutex);
if (is_slow_work) {
/* `slow_io_work_running` is protected by `mutex`. */
slow_io_work_running--;
}
}
}
static void post(QUEUE* q) {
static void post(QUEUE* q, enum uv__work_kind kind) {
uv_mutex_lock(&mutex);
if (kind == UV__WORK_SLOW_IO) {
/* Insert into a separate queue. */
QUEUE_INSERT_TAIL(&slow_io_pending_wq, q);
if (!QUEUE_EMPTY(&run_slow_work_message)) {
/* Running slow I/O tasks is already scheduled => Nothing to do here.
The worker that runs said other task will schedule this one as well. */
uv_mutex_unlock(&mutex);
return;
}
q = &run_slow_work_message;
}
QUEUE_INSERT_TAIL(&wq, q);
if (idle_threads > 0)
uv_cond_signal(&cond);
......@@ -108,7 +167,7 @@ UV_DESTRUCTOR(static void cleanup(void)) {
if (nthreads == 0)
return;
post(&exit_message);
post(&exit_message, UV__WORK_CPU);
for (i = 0; i < nthreads; i++)
if (uv_thread_join(threads + i))
......@@ -156,6 +215,8 @@ static void init_threads(void) {
abort();
QUEUE_INIT(&wq);
QUEUE_INIT(&slow_io_pending_wq);
QUEUE_INIT(&run_slow_work_message);
if (uv_sem_init(&sem, 0))
abort();
......@@ -194,13 +255,14 @@ static void init_once(void) {
void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
enum uv__work_kind kind,
void (*work)(struct uv__work* w),
void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
w->done = done;
post(&w->wq);
post(&w->wq, kind);
}
......@@ -284,7 +346,11 @@ int uv_queue_work(uv_loop_t* loop,
req->loop = loop;
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
uv__work_submit(loop,
&req->work_req,
UV__WORK_CPU,
uv__queue_work,
uv__queue_done);
return 0;
}
......
......@@ -119,16 +119,13 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
continue;
address = *addresses;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
for (i = 0; i < *count; i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
#if defined(__CYGWIN__) || defined(__MSYS__)
memset(address->phys_addr, 0, sizeof(address->phys_addr));
#else
struct sockaddr_dl* sa_addr;
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
#endif
}
address++;
}
......
......@@ -1338,6 +1338,9 @@ uv_os_fd_t uv_get_osfhandle(int fd) {
return fd;
}
int uv_open_osfhandle(uv_os_fd_t os_fd) {
return os_fd;
}
uv_pid_t uv_os_getpid(void) {
return getpid();
......@@ -1347,3 +1350,31 @@ uv_pid_t uv_os_getpid(void) {
uv_pid_t uv_os_getppid(void) {
return getppid();
}
int uv_os_getpriority(uv_pid_t pid, int* priority) {
int r;
if (priority == NULL)
return UV_EINVAL;
errno = 0;
r = getpriority(PRIO_PROCESS, (int) pid);
if (r == -1 && errno != 0)
return UV__ERR(errno);
*priority = r;
return 0;
}
int uv_os_setpriority(uv_pid_t pid, int priority) {
if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
return UV_EINVAL;
if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
return UV__ERR(errno);
return 0;
}
......@@ -38,7 +38,7 @@ int uv_uptime(double* uptime) {
int uv_resident_set_memory(size_t* rss) {
/* FIXME: read /proc/meminfo? */
*rss = 0;
return UV_ENOSYS;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
......
This diff is collapsed.
......@@ -186,6 +186,7 @@ int uv_getaddrinfo(uv_loop_t* loop,
if (cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
return 0;
......
......@@ -109,6 +109,7 @@ int uv_getnameinfo(uv_loop_t* loop,
if (getnameinfo_cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getnameinfo_work,
uv__getnameinfo_done);
return 0;
......
......@@ -72,7 +72,8 @@ void uv_loadavg(double avg[3]) {
int uv_resident_set_memory(size_t* rss) {
return UV_ENOSYS;
*rss = 0;
return 0;
}
......
......@@ -20,7 +20,7 @@
/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
* EPOLL* counterparts. We use the POLL* variants in this file because that
* is what libuv uses elsewhere and it avoids a dependency on <sys/epoll.h>.
* is what libuv uses elsewhere.
*/
#include "uv.h"
......@@ -34,6 +34,7 @@
#include <errno.h>
#include <net/if.h>
#include <sys/epoll.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/sysinfo.h>
......@@ -84,13 +85,13 @@ static unsigned long read_cpufreq(unsigned int cpunum);
int uv__platform_loop_init(uv_loop_t* loop) {
int fd;
fd = uv__epoll_create1(UV__EPOLL_CLOEXEC);
fd = epoll_create1(EPOLL_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the EPOLL_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = uv__epoll_create(256);
fd = epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
......@@ -134,20 +135,20 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct uv__epoll_event* events;
struct uv__epoll_event dummy;
struct epoll_event* events;
struct epoll_event dummy;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers];
events = (struct epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].data == fd)
events[i].data = -1;
if (events[i].data.fd == fd)
events[i].data.fd = -1;
/* Remove the file descriptor from the epoll.
* This avoids a problem where the same file description remains open
......@@ -160,25 +161,25 @@ void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
*/
memset(&dummy, 0, sizeof(dummy));
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy);
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
}
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct uv__epoll_event e;
struct epoll_event e;
int rc;
e.events = POLLIN;
e.data = -1;
e.data.fd = -1;
rc = 0;
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_ADD, fd, &e))
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
if (errno != EEXIST)
rc = UV__ERR(errno);
if (rc == 0)
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &e))
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
abort();
return rc;
......@@ -195,16 +196,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* that being the largest value I have seen in the wild (and only once.)
*/
static const int max_safe_timeout = 1789569;
static int no_epoll_pwait;
static int no_epoll_wait;
struct uv__epoll_event events[1024];
struct uv__epoll_event* pe;
struct uv__epoll_event e;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
int real_timeout;
QUEUE* q;
uv__io_t* w;
sigset_t sigset;
uint64_t sigmask;
sigset_t* psigset;
uint64_t base;
int have_signals;
int nevents;
......@@ -230,35 +229,35 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data = w->fd;
e.data.fd = w->fd;
if (w->events == 0)
op = UV__EPOLL_CTL_ADD;
op = EPOLL_CTL_ADD;
else
op = UV__EPOLL_CTL_MOD;
op = EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == UV__EPOLL_CTL_ADD);
assert(op == EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
sigmask = 0;
psigset = NULL;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
sigemptyset(&sigset);
sigaddset(&sigset, SIGPROF);
sigmask |= 1 << (SIGPROF - 1);
psigset = &sigset;
}
assert(timeout >= -1);
......@@ -273,30 +272,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
abort();
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
nfds = uv__epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
sigmask);
if (nfds == -1 && errno == ENOSYS)
no_epoll_pwait = 1;
} else {
nfds = uv__epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
if (nfds == -1 && errno == ENOSYS)
no_epoll_wait = 1;
}
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
abort();
nfds = epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
psigset);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
......@@ -317,12 +297,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
if (nfds == -1) {
if (errno == ENOSYS) {
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
continue;
}
if (errno != EINTR)
abort();
......@@ -344,7 +318,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data;
fd = pe->data.fd;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
......@@ -361,7 +335,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
continue;
}
......@@ -916,6 +890,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
continue;
address = *addresses;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
......
......@@ -77,56 +77,6 @@
# endif
#endif /* __NR_eventfd2 */
#ifndef __NR_epoll_create
# if defined(__x86_64__)
# define __NR_epoll_create 213
# elif defined(__i386__)
# define __NR_epoll_create 254
# elif defined(__arm__)
# define __NR_epoll_create (UV_SYSCALL_BASE + 250)
# endif
#endif /* __NR_epoll_create */
#ifndef __NR_epoll_create1
# if defined(__x86_64__)
# define __NR_epoll_create1 291
# elif defined(__i386__)
# define __NR_epoll_create1 329
# elif defined(__arm__)
# define __NR_epoll_create1 (UV_SYSCALL_BASE + 357)
# endif
#endif /* __NR_epoll_create1 */
#ifndef __NR_epoll_ctl
# if defined(__x86_64__)
# define __NR_epoll_ctl 233 /* used to be 214 */
# elif defined(__i386__)
# define __NR_epoll_ctl 255
# elif defined(__arm__)
# define __NR_epoll_ctl (UV_SYSCALL_BASE + 251)
# endif
#endif /* __NR_epoll_ctl */
#ifndef __NR_epoll_wait
# if defined(__x86_64__)
# define __NR_epoll_wait 232 /* used to be 215 */
# elif defined(__i386__)
# define __NR_epoll_wait 256
# elif defined(__arm__)
# define __NR_epoll_wait (UV_SYSCALL_BASE + 252)
# endif
#endif /* __NR_epoll_wait */
#ifndef __NR_epoll_pwait
# if defined(__x86_64__)
# define __NR_epoll_pwait 281
# elif defined(__i386__)
# define __NR_epoll_pwait 319
# elif defined(__arm__)
# define __NR_epoll_pwait (UV_SYSCALL_BASE + 346)
# endif
#endif /* __NR_epoll_pwait */
#ifndef __NR_inotify_init
# if defined(__x86_64__)
# define __NR_inotify_init 253
......@@ -285,76 +235,6 @@ int uv__eventfd2(unsigned int count, int flags) {
}
int uv__epoll_create(int size) {
#if defined(__NR_epoll_create)
return syscall(__NR_epoll_create, size);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_create1(int flags) {
#if defined(__NR_epoll_create1)
return syscall(__NR_epoll_create1, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event* events) {
#if defined(__NR_epoll_ctl)
return syscall(__NR_epoll_ctl, epfd, op, fd, events);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_wait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout) {
#if defined(__NR_epoll_wait)
int result;
result = syscall(__NR_epoll_wait, epfd, events, nevents, timeout);
#if MSAN_ACTIVE
if (result > 0)
__msan_unpoison(events, sizeof(events[0]) * result);
#endif
return result;
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_pwait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout,
uint64_t sigmask) {
#if defined(__NR_epoll_pwait)
int result;
result = syscall(__NR_epoll_pwait,
epfd,
events,
nevents,
timeout,
&sigmask,
sizeof(sigmask));
#if MSAN_ACTIVE
if (result > 0)
__msan_unpoison(events, sizeof(events[0]) * result);
#endif
return result;
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_init(void) {
#if defined(__NR_inotify_init)
return syscall(__NR_inotify_init);
......@@ -431,19 +311,6 @@ int uv__recvmmsg(int fd,
}
int uv__utimesat(int dirfd,
const char* path,
const struct timespec times[2],
int flags)
{
#if defined(__NR_utimensat)
return syscall(__NR_utimensat, dirfd, path, times, flags);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if defined(__NR_preadv)
return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
......
......@@ -66,12 +66,6 @@
# define UV__SOCK_NONBLOCK UV__O_NONBLOCK
#endif
/* epoll flags */
#define UV__EPOLL_CLOEXEC UV__O_CLOEXEC
#define UV__EPOLL_CTL_ADD 1
#define UV__EPOLL_CTL_DEL 2
#define UV__EPOLL_CTL_MOD 3
/* inotify flags */
#define UV__IN_ACCESS 0x001
#define UV__IN_MODIFY 0x002
......@@ -86,18 +80,6 @@
#define UV__IN_DELETE_SELF 0x400
#define UV__IN_MOVE_SELF 0x800
#if defined(__x86_64__)
struct uv__epoll_event {
uint32_t events;
uint64_t data;
} __attribute__((packed));
#else
struct uv__epoll_event {
uint32_t events;
uint64_t data;
};
#endif
struct uv__inotify_event {
int32_t wd;
uint32_t mask;
......@@ -113,18 +95,6 @@ struct uv__mmsghdr {
int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags);
int uv__eventfd(unsigned int count);
int uv__epoll_create(int size);
int uv__epoll_create1(int flags);
int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event *ev);
int uv__epoll_wait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout);
int uv__epoll_pwait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout,
uint64_t sigmask);
int uv__eventfd2(unsigned int count, int flags);
int uv__inotify_init(void);
int uv__inotify_init1(int flags);
......@@ -140,10 +110,6 @@ int uv__sendmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags);
int uv__utimesat(int dirfd,
const char* path,
const struct timespec times[2],
int flags);
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
int uv__dup3(int oldfd, int newfd, int flags);
......
......@@ -36,10 +36,6 @@
#define MAX_ITEMS_PER_EPOLL 1024
#define UV__O_CLOEXEC 0x80000
#define UV__EPOLL_CLOEXEC UV__O_CLOEXEC
#define UV__EPOLL_CTL_ADD EPOLL_CTL_ADD
#define UV__EPOLL_CTL_DEL EPOLL_CTL_DEL
#define UV__EPOLL_CTL_MOD EPOLL_CTL_MOD
struct epoll_event {
int events;
......
......@@ -512,7 +512,7 @@ static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
/* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
address->is_internal = flg.__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
address++;
}
......@@ -624,6 +624,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
}
address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
address++;
}
......@@ -662,7 +663,7 @@ void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
/* Remove the file descriptor from the epoll. */
if (loop->ep != NULL)
epoll_ctl(loop->ep, UV__EPOLL_CTL_DEL, fd, &dummy);
epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, &dummy);
}
......@@ -838,9 +839,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
e.fd = w->fd;
if (w->events == 0)
op = UV__EPOLL_CTL_ADD;
op = EPOLL_CTL_ADD;
else
op = UV__EPOLL_CTL_MOD;
op = EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
......@@ -849,10 +850,10 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (errno != EEXIST)
abort();
assert(op == UV__EPOLL_CTL_ADD);
assert(op == EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (epoll_ctl(loop->ep, UV__EPOLL_CTL_MOD, w->fd, &e))
if (epoll_ctl(loop->ep, EPOLL_CTL_MOD, w->fd, &e))
abort();
}
......@@ -934,7 +935,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
epoll_ctl(loop->ep, UV__EPOLL_CTL_DEL, fd, pe);
epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, pe);
continue;
}
......
......@@ -132,11 +132,21 @@ void uv__pipe_close(uv_pipe_t* handle) {
int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
int flags;
int mode;
int err;
flags = 0;
if (uv__fd_exists(handle->loop, fd))
return UV_EEXIST;
do
mode = fcntl(fd, F_GETFL);
while (mode == -1 && errno == EINTR);
if (mode == -1)
return UV__ERR(errno); /* according to docs, must be EBADF */
err = uv__nonblock(fd, 1);
if (err)
return err;
......@@ -147,9 +157,13 @@ int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
return err;
#endif /* defined(__APPLE__) */
return uv__stream_open((uv_stream_t*)handle,
fd,
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
mode &= O_ACCMODE;
if (mode != O_WRONLY)
flags |= UV_HANDLE_READABLE;
if (mode != O_RDONLY)
flags |= UV_HANDLE_WRITABLE;
return uv__stream_open((uv_stream_t*)handle, fd, flags);
}
......
......@@ -950,10 +950,16 @@ error:
static void uv__write_callbacks(uv_stream_t* stream) {
uv_write_t* req;
QUEUE* q;
QUEUE pq;
while (!QUEUE_EMPTY(&stream->write_completed_queue)) {
if (QUEUE_EMPTY(&stream->write_completed_queue))
return;
QUEUE_MOVE(&stream->write_completed_queue, &pq);
while (!QUEUE_EMPTY(&pq)) {
/* Pop a req off write_completed_queue. */
q = QUEUE_HEAD(&stream->write_completed_queue);
q = QUEUE_HEAD(&pq);
req = QUEUE_DATA(q, uv_write_t, queue);
QUEUE_REMOVE(q);
uv__req_unregister(stream->loop, req);
......@@ -969,8 +975,6 @@ static void uv__write_callbacks(uv_stream_t* stream) {
if (req->cb)
req->cb(req, req->error);
}
assert(QUEUE_EMPTY(&stream->write_completed_queue));
}
......@@ -1672,6 +1676,7 @@ void uv__stream_close(uv_stream_t* handle) {
uv__io_close(handle->loop, &handle->io_watcher);
uv_read_stop(handle);
uv__handle_stop(handle);
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
if (handle->io_watcher.fd != -1) {
/* Don't close stdio file descriptors. Nothing good comes from it. */
......
......@@ -44,108 +44,119 @@
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
#endif
#if defined(UV__PTHREAD_BARRIER_FALLBACK)
/* TODO: support barrier_attr */
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count) {
/* Note: guard clauses should match uv_barrier_t's in include/uv/uv-unix.h. */
#if defined(_AIX) || !defined(PTHREAD_BARRIER_SERIAL_THREAD)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
struct _uv_barrier* b;
int rc;
_uv_barrier* b;
if (barrier == NULL || count == 0)
return EINVAL;
if (barrier_attr != NULL)
return ENOTSUP;
return UV_EINVAL;
b = uv__malloc(sizeof(*b));
if (b == NULL)
return ENOMEM;
return UV_ENOMEM;
b->in = 0;
b->out = 0;
b->threshold = count;
if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0)
rc = uv_mutex_init(&b->mutex);
if (rc != 0)
goto error2;
if ((rc = pthread_cond_init(&b->cond, NULL)) != 0)
rc = uv_cond_init(&b->cond);
if (rc != 0)
goto error;
barrier->b = b;
return 0;
error:
pthread_mutex_destroy(&b->mutex);
uv_mutex_destroy(&b->mutex);
error2:
uv__free(b);
return rc;
}
int pthread_barrier_wait(pthread_barrier_t* barrier) {
int rc;
_uv_barrier* b;
int uv_barrier_wait(uv_barrier_t* barrier) {
struct _uv_barrier* b;
int last;
if (barrier == NULL || barrier->b == NULL)
return EINVAL;
return UV_EINVAL;
b = barrier->b;
/* Lock the mutex*/
if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
return rc;
uv_mutex_lock(&b->mutex);
/* Increment the count. If this is the first thread to reach the threshold,
wake up waiters, unlock the mutex, then return
PTHREAD_BARRIER_SERIAL_THREAD. */
if (++b->in == b->threshold) {
b->in = 0;
b->out = b->threshold - 1;
rc = pthread_cond_signal(&b->cond);
assert(rc == 0);
pthread_mutex_unlock(&b->mutex);
return PTHREAD_BARRIER_SERIAL_THREAD;
b->out = b->threshold;
uv_cond_signal(&b->cond);
} else {
do
uv_cond_wait(&b->cond, &b->mutex);
while (b->in != 0);
}
/* Otherwise, wait for other threads until in is set to 0,
then return 0 to indicate this is not the first thread. */
do {
if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0)
break;
} while (b->in != 0);
/* mark thread exit */
b->out--;
pthread_cond_signal(&b->cond);
pthread_mutex_unlock(&b->mutex);
return rc;
last = (--b->out == 0);
if (!last)
uv_cond_signal(&b->cond); /* Not needed for last thread. */
uv_mutex_unlock(&b->mutex);
return last;
}
int pthread_barrier_destroy(pthread_barrier_t* barrier) {
int rc;
_uv_barrier* b;
if (barrier == NULL || barrier->b == NULL)
return EINVAL;
void uv_barrier_destroy(uv_barrier_t* barrier) {
struct _uv_barrier* b;
b = barrier->b;
uv_mutex_lock(&b->mutex);
if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
return rc;
assert(b->in == 0);
assert(b->out == 0);
if (b->in > 0 || b->out > 0)
rc = EBUSY;
pthread_mutex_unlock(&b->mutex);
if (b->in != 0 || b->out != 0)
abort();
if (rc)
return rc;
uv_mutex_unlock(&b->mutex);
uv_mutex_destroy(&b->mutex);
uv_cond_destroy(&b->cond);
pthread_cond_destroy(&b->cond);
pthread_mutex_destroy(&b->mutex);
uv__free(barrier->b);
barrier->b = NULL;
return 0;
}
#else
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int rc;
rc = pthread_barrier_wait(barrier);
if (rc != 0)
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
#endif
......@@ -771,25 +782,6 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
}
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int r = pthread_barrier_wait(barrier);
if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return r == PTHREAD_BARRIER_SERIAL_THREAD;
}
int uv_key_create(uv_key_t* key) {
return UV__ERR(pthread_key_create(key, NULL));
}
......
......@@ -92,13 +92,15 @@ static int uv__tty_is_slave(const int fd) {
return result;
}
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int unused) {
uv_handle_type type;
int flags;
int newfd;
int r;
int saved_flags;
int mode;
char path[256];
(void)unused; /* deprecated parameter is no longer needed */
/* File descriptors that refer to files cannot be monitored with epoll.
* That restriction also applies to character devices like /dev/random
......@@ -111,6 +113,15 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
flags = 0;
newfd = -1;
/* Save the fd flags in case we need to restore them due to an error. */
do
saved_flags = fcntl(fd, F_GETFL);
while (saved_flags == -1 && errno == EINTR);
if (saved_flags == -1)
return UV__ERR(errno);
mode = saved_flags & O_ACCMODE;
/* Reopen the file descriptor when it refers to a tty. This lets us put the
* tty in non-blocking mode without affecting other processes that share it
* with us.
......@@ -128,13 +139,13 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
* slave device.
*/
if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0)
r = uv__open_cloexec(path, O_RDWR);
r = uv__open_cloexec(path, mode);
else
r = -1;
if (r < 0) {
/* fallback to using blocking writes */
if (!readable)
if (mode != O_RDONLY)
flags |= UV_HANDLE_BLOCKING_WRITES;
goto skip;
}
......@@ -154,22 +165,6 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
fd = newfd;
}
#if defined(__APPLE__)
/* Save the fd flags in case we need to restore them due to an error. */
do
saved_flags = fcntl(fd, F_GETFL);
while (saved_flags == -1 && errno == EINTR);
if (saved_flags == -1) {
if (newfd != -1)
uv__close(newfd);
return UV__ERR(errno);
}
#endif
/* Pacify the compiler. */
(void) &saved_flags;
skip:
uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
......@@ -194,9 +189,9 @@ skip:
}
#endif
if (readable)
if (mode != O_WRONLY)
flags |= UV_HANDLE_READABLE;
else
if (mode != O_RDONLY)
flags |= UV_HANDLE_WRITABLE;
uv__stream_open((uv_stream_t*) tty, fd, flags);
......
......@@ -164,8 +164,15 @@ void uv__fs_poll_close(uv_fs_poll_t* handle);
int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */
enum uv__work_kind {
UV__WORK_CPU,
UV__WORK_FAST_IO,
UV__WORK_SLOW_IO
};
void uv__work_submit(uv_loop_t* loop,
struct uv__work *w,
enum uv__work_kind kind,
void (*work)(struct uv__work *w),
void (*done)(struct uv__work *w, int status));
......
......@@ -249,8 +249,10 @@ int uv_loop_init(uv_loop_t* loop) {
loop->endgame_handles = NULL;
loop->timer_heap = timer_heap = uv__malloc(sizeof(*timer_heap));
if (timer_heap == NULL)
if (timer_heap == NULL) {
err = UV_ENOMEM;
goto fail_timers_alloc;
}
heap_init(timer_heap);
......@@ -379,6 +381,57 @@ int uv_backend_timeout(const uv_loop_t* loop) {
}
static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
DWORD bytes;
ULONG_PTR key;
OVERLAPPED* overlapped;
uv_req_t* req;
int repeat;
uint64_t timeout_time;
timeout_time = loop->time + timeout;
for (repeat = 0; ; repeat++) {
GetQueuedCompletionStatus(loop->iocp,
&bytes,
&key,
&overlapped,
timeout);
if (overlapped) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlapped);
uv_insert_pending_req(loop, req);
/* Some time might have passed waiting for I/O,
* so update the loop time here.
*/
uv_update_time(loop);
} else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
} else if (timeout > 0) {
/* GetQueuedCompletionStatus can occasionally return a little early.
* Make sure that the desired timeout target time is reached.
*/
uv_update_time(loop);
if (timeout_time > loop->time) {
timeout = (DWORD)(timeout_time - loop->time);
/* The first call to GetQueuedCompletionStatus should return very
* close to the target time and the second should reach it, but
* this is not stated in the documentation. To make sure a busy
* loop cannot happen, the timeout is increased exponentially
* starting on the third round.
*/
timeout += repeat ? (1 << (repeat - 1)) : 0;
continue;
}
}
break;
}
}
static void uv__poll(uv_loop_t* loop, DWORD timeout) {
BOOL success;
uv_req_t* req;
......@@ -471,7 +524,11 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
uv__poll(loop, timeout);
if (pGetQueuedCompletionStatusEx)
uv__poll(loop, timeout);
else
uv__poll_wine(loop, timeout);
uv_check_invoke(loop);
uv_process_endgames(loop);
......
......@@ -83,7 +83,7 @@ static void uv_relative_path(const WCHAR* filename,
static int uv_split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
size_t len, i;
if (filename == NULL) {
if (dir != NULL)
*dir = NULL;
......
......@@ -55,7 +55,11 @@
do { \
if (cb != NULL) { \
uv__req_register(loop, req); \
uv__work_submit(loop, &req->work_req, uv__fs_work, uv__fs_done); \
uv__work_submit(loop, \
&req->work_req, \
UV__WORK_FAST_IO, \
uv__fs_work, \
uv__fs_done); \
return 0; \
} else { \
uv__fs_work(&req->work_req); \
......@@ -1513,10 +1517,10 @@ static void fs__fchmod(uv_fs_t* req) {
SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
goto fchmod_cleanup;
}
/* Test if the Archive attribute is cleared */
if ((file_info.FileAttributes & FILE_ATTRIBUTE_ARCHIVE) == 0) {
/* Set Archive flag, otherwise setting or clearing the read-only
/* Set Archive flag, otherwise setting or clearing the read-only
flag will not work */
file_info.FileAttributes |= FILE_ATTRIBUTE_ARCHIVE;
nt_status = pNtSetInformationFile(handle,
......
......@@ -368,6 +368,7 @@ int uv_getaddrinfo(uv_loop_t* loop,
if (getaddrinfo_cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
return 0;
......
......@@ -42,7 +42,7 @@ static void uv__getnameinfo_work(struct uv__work* w) {
uv_getnameinfo_t* req;
WCHAR host[NI_MAXHOST];
WCHAR service[NI_MAXSERV];
int ret = 0;
int ret;
req = container_of(w, uv_getnameinfo_t, work_req);
if (GetNameInfoW((struct sockaddr*)&req->storage,
......@@ -53,27 +53,34 @@ static void uv__getnameinfo_work(struct uv__work* w) {
ARRAY_SIZE(service),
req->flags)) {
ret = WSAGetLastError();
req->retcode = uv__getaddrinfo_translate_error(ret);
return;
}
ret = WideCharToMultiByte(CP_UTF8,
0,
host,
-1,
req->host,
sizeof(req->host),
NULL,
NULL);
if (ret == 0) {
req->retcode = uv_translate_sys_error(GetLastError());
return;
}
ret = WideCharToMultiByte(CP_UTF8,
0,
service,
-1,
req->service,
sizeof(req->service),
NULL,
NULL);
if (ret == 0) {
req->retcode = uv_translate_sys_error(GetLastError());
}
req->retcode = uv__getaddrinfo_translate_error(ret);
/* convert results to UTF-8 */
WideCharToMultiByte(CP_UTF8,
0,
host,
-1,
req->host,
sizeof(req->host),
NULL,
NULL);
WideCharToMultiByte(CP_UTF8,
0,
service,
-1,
req->service,
sizeof(req->service),
NULL,
NULL);
}
......@@ -138,6 +145,7 @@ int uv_getnameinfo(uv_loop_t* loop,
if (getnameinfo_cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getnameinfo_work,
uv__getnameinfo_done);
return 0;
......
......@@ -157,3 +157,7 @@ int uv_is_closing(const uv_handle_t* handle) {
uv_os_fd_t uv_get_osfhandle(int fd) {
return uv__get_osfhandle(fd);
}
int uv_open_osfhandle(uv_os_fd_t os_fd) {
return _open_osfhandle((intptr_t) os_fd, 0);
}
......@@ -61,10 +61,15 @@ extern UV_THREAD_LOCAL int uv__crt_assert_enabled;
* TCP
*/
typedef enum {
UV__IPC_SOCKET_XFER_NONE = 0,
UV__IPC_SOCKET_XFER_TCP_CONNECTION,
UV__IPC_SOCKET_XFER_TCP_SERVER
} uv__ipc_socket_xfer_type_t;
typedef struct {
WSAPROTOCOL_INFOW socket_info;
uint32_t delayed_error;
uint32_t flags; /* Either zero or UV_HANDLE_CONNECTION. */
} uv__ipc_socket_xfer_info_t;
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
......@@ -89,8 +94,11 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
int uv__tcp_xfer_export(uv_tcp_t* handle,
int pid,
uv__ipc_socket_xfer_type_t* xfer_type,
uv__ipc_socket_xfer_info_t* xfer_info);
int uv__tcp_xfer_import(uv_tcp_t* tcp,
uv__ipc_socket_xfer_type_t xfer_type,
uv__ipc_socket_xfer_info_t* xfer_info);
int uv__tcp_xfer_import(uv_tcp_t* tcp, uv__ipc_socket_xfer_info_t* xfer_info);
/*
......
This diff is collapsed.
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
......@@ -1191,8 +1191,12 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
int uv__tcp_xfer_export(uv_tcp_t* handle,
int target_pid,
uv__ipc_socket_xfer_type_t* xfer_type,
uv__ipc_socket_xfer_info_t* xfer_info) {
if (!(handle->flags & UV_HANDLE_CONNECTION)) {
if (handle->flags & UV_HANDLE_CONNECTION) {
*xfer_type = UV__IPC_SOCKET_XFER_TCP_CONNECTION;
} else {
*xfer_type = UV__IPC_SOCKET_XFER_TCP_SERVER;
/* We're about to share the socket with another process. Because this is a
* listening socket, we assume that the other process will be accepting
* connections on it. Thus, before sharing the socket with another process,
......@@ -1208,12 +1212,9 @@ int uv__tcp_xfer_export(uv_tcp_t* handle,
}
}
if (WSADuplicateSocketW(
handle->socket, target_pid, &xfer_info->socket_info)) {
if (WSADuplicateSocketW(handle->socket, target_pid, &xfer_info->socket_info))
return WSAGetLastError();
}
xfer_info->delayed_error = handle->delayed_error;
xfer_info->flags = handle->flags & UV_HANDLE_CONNECTION;
/* Mark the local copy of the handle as 'shared' so we behave in a way that's
* friendly to the process(es) that we share the socket with. */
......@@ -1223,14 +1224,21 @@ int uv__tcp_xfer_export(uv_tcp_t* handle,
}
int uv__tcp_xfer_import(uv_tcp_t* tcp, uv__ipc_socket_xfer_info_t* xfer_info) {
int uv__tcp_xfer_import(uv_tcp_t* tcp,
uv__ipc_socket_xfer_type_t xfer_type,
uv__ipc_socket_xfer_info_t* xfer_info) {
int err;
SOCKET socket = WSASocketW(FROM_PROTOCOL_INFO,
FROM_PROTOCOL_INFO,
FROM_PROTOCOL_INFO,
&xfer_info->socket_info,
0,
WSA_FLAG_OVERLAPPED);
SOCKET socket;
assert(xfer_type == UV__IPC_SOCKET_XFER_TCP_SERVER ||
xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION);
socket = WSASocketW(FROM_PROTOCOL_INFO,
FROM_PROTOCOL_INFO,
FROM_PROTOCOL_INFO,
&xfer_info->socket_info,
0,
WSA_FLAG_OVERLAPPED);
if (socket == INVALID_SOCKET) {
return WSAGetLastError();
......@@ -1246,7 +1254,7 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp, uv__ipc_socket_xfer_info_t* xfer_info) {
tcp->delayed_error = xfer_info->delayed_error;
tcp->flags |= UV_HANDLE_BOUND | UV_HANDLE_SHARED_TCP_SOCKET;
if (xfer_info->flags & UV_HANDLE_CONNECTION) {
if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
uv_connection_init((uv_stream_t*)tcp);
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
......
......@@ -118,7 +118,7 @@ int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
ctx->arg = arg;
/* Create the thread in suspended state so we have a chance to pass
* its own creation handle to it */
* its own creation handle to it */
thread = (HANDLE) _beginthreadex(NULL,
0,
uv__thread_start,
......
......@@ -172,9 +172,12 @@ void uv_console_init(void) {
}
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int readable) {
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int unused) {
BOOL readable;
DWORD NumberOfEvents;
HANDLE handle;
CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info;
(void)unused;
uv__once_init();
handle = (HANDLE) uv__get_osfhandle(fd);
......@@ -199,6 +202,7 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int readable) {
fd = -1;
}
readable = GetNumberOfConsoleInputEvents(handle, &NumberOfEvents);
if (!readable) {
/* Obtain the screen buffer info with the output handle. */
if (!GetConsoleScreenBufferInfo(handle, &screen_buffer_info)) {
......@@ -382,12 +386,6 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
}
int uv_is_tty(uv_file file) {
DWORD result;
return GetConsoleMode((HANDLE) _get_osfhandle(file), &result) != 0;
}
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
CONSOLE_SCREEN_BUFFER_INFO info;
......@@ -1035,6 +1033,7 @@ int uv_tty_read_stop(uv_tty_t* handle) {
/* Cancel raw read. Write some bullshit event to force the console wait to
* return. */
memset(&record, 0, sizeof record);
record.EventType = FOCUS_EVENT;
if (!WriteConsoleInputW(handle->handle, &record, 1, &written)) {
return GetLastError();
}
......@@ -2181,14 +2180,14 @@ void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
void uv_tty_close(uv_tty_t* handle) {
assert(handle->u.fd == -1 || handle->u.fd > 2);
if (handle->flags & UV_HANDLE_READING)
uv_tty_read_stop(handle);
if (handle->u.fd == -1)
CloseHandle(handle->handle);
else
close(handle->u.fd);
if (handle->flags & UV_HANDLE_READING)
uv_tty_read_stop(handle);
handle->u.fd = -1;
handle->handle = INVALID_HANDLE_VALUE;
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
......
......@@ -366,7 +366,7 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
int err;
if (handle->flags & UV_HANDLE_READING) {
return WSAEALREADY;
return UV_EALREADY;
}
err = uv_udp_maybe_bind(handle,
......@@ -374,7 +374,7 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
sizeof(uv_addr_ip4_any_),
0);
if (err)
return err;
return uv_translate_sys_error(err);
handle->flags |= UV_HANDLE_READING;
INCREASE_ACTIVE_COUNT(loop, handle);
......
......@@ -1530,3 +1530,97 @@ int uv_os_gethostname(char* buffer, size_t* size) {
*size = len;
return 0;
}
static int uv__get_handle(uv_pid_t pid, int access, HANDLE* handle) {
int r;
if (pid == 0)
*handle = GetCurrentProcess();
else
*handle = OpenProcess(access, FALSE, pid);
if (*handle == NULL) {
r = GetLastError();
if (r == ERROR_INVALID_PARAMETER)
return UV_ESRCH;
else
return uv_translate_sys_error(r);
}
return 0;
}
int uv_os_getpriority(uv_pid_t pid, int* priority) {
HANDLE handle;
int r;
if (priority == NULL)
return UV_EINVAL;
r = uv__get_handle(pid, PROCESS_QUERY_LIMITED_INFORMATION, &handle);
if (r != 0)
return r;
r = GetPriorityClass(handle);
if (r == 0) {
r = uv_translate_sys_error(GetLastError());
} else {
/* Map Windows priority classes to Unix nice values. */
if (r == REALTIME_PRIORITY_CLASS)
*priority = UV_PRIORITY_HIGHEST;
else if (r == HIGH_PRIORITY_CLASS)
*priority = UV_PRIORITY_HIGH;
else if (r == ABOVE_NORMAL_PRIORITY_CLASS)
*priority = UV_PRIORITY_ABOVE_NORMAL;
else if (r == NORMAL_PRIORITY_CLASS)
*priority = UV_PRIORITY_NORMAL;
else if (r == BELOW_NORMAL_PRIORITY_CLASS)
*priority = UV_PRIORITY_BELOW_NORMAL;
else /* IDLE_PRIORITY_CLASS */
*priority = UV_PRIORITY_LOW;
r = 0;
}
CloseHandle(handle);
return r;
}
int uv_os_setpriority(uv_pid_t pid, int priority) {
HANDLE handle;
int priority_class;
int r;
/* Map Unix nice values to Windows priority classes. */
if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
return UV_EINVAL;
else if (priority < UV_PRIORITY_HIGH)
priority_class = REALTIME_PRIORITY_CLASS;
else if (priority < UV_PRIORITY_ABOVE_NORMAL)
priority_class = HIGH_PRIORITY_CLASS;
else if (priority < UV_PRIORITY_NORMAL)
priority_class = ABOVE_NORMAL_PRIORITY_CLASS;
else if (priority < UV_PRIORITY_BELOW_NORMAL)
priority_class = NORMAL_PRIORITY_CLASS;
else if (priority < UV_PRIORITY_LOW)
priority_class = BELOW_NORMAL_PRIORITY_CLASS;
else
priority_class = IDLE_PRIORITY_CLASS;
r = uv__get_handle(pid, PROCESS_SET_INFORMATION, &handle);
if (r != 0)
return r;
if (SetPriorityClass(handle, priority_class) == 0)
r = uv_translate_sys_error(GetLastError());
CloseHandle(handle);
return r;
}
......@@ -34,6 +34,9 @@ sNtQueryVolumeInformationFile pNtQueryVolumeInformationFile;
sNtQueryDirectoryFile pNtQueryDirectoryFile;
sNtQuerySystemInformation pNtQuerySystemInformation;
/* Kernel32 function pointers */
sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
/* Powrprof.dll function pointer */
sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification;
......@@ -45,6 +48,7 @@ void uv_winapi_init(void) {
HMODULE ntdll_module;
HMODULE powrprof_module;
HMODULE user32_module;
HMODULE kernel32_module;
ntdll_module = GetModuleHandleA("ntdll.dll");
if (ntdll_module == NULL) {
......@@ -98,6 +102,15 @@ void uv_winapi_init(void) {
uv_fatal_error(GetLastError(), "GetProcAddress");
}
kernel32_module = GetModuleHandleA("kernel32.dll");
if (kernel32_module == NULL) {
uv_fatal_error(GetLastError(), "GetModuleHandleA");
}
pGetQueuedCompletionStatusEx = (sGetQueuedCompletionStatusEx) GetProcAddress(
kernel32_module,
"GetQueuedCompletionStatusEx");
powrprof_module = LoadLibraryA("powrprof.dll");
if (powrprof_module != NULL) {
pPowerRegisterSuspendResumeNotification = (sPowerRegisterSuspendResumeNotification)
......
......@@ -4642,6 +4642,14 @@ typedef NTSTATUS (NTAPI *sNtQueryDirectoryFile)
# define ERROR_MUI_FILE_NOT_LOADED 15105
#endif
typedef BOOL (WINAPI *sGetQueuedCompletionStatusEx)
(HANDLE CompletionPort,
LPOVERLAPPED_ENTRY lpCompletionPortEntries,
ULONG ulCount,
PULONG ulNumEntriesRemoved,
DWORD dwMilliseconds,
BOOL fAlertable);
/* from powerbase.h */
#ifndef DEVICE_NOTIFY_CALLBACK
# define DEVICE_NOTIFY_CALLBACK 2
......@@ -4704,6 +4712,9 @@ extern sNtQueryVolumeInformationFile pNtQueryVolumeInformationFile;
extern sNtQueryDirectoryFile pNtQueryDirectoryFile;
extern sNtQuerySystemInformation pNtQuerySystemInformation;
/* Kernel32 function pointers */
extern sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
/* Powrprof.dll function pointer */
extern sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification;
......
......@@ -116,7 +116,6 @@
'src/win/poll.c',
'src/win/process.c',
'src/win/process-stdio.c',
'src/win/req.c',
'src/win/req-inl.h',
'src/win/signal.c',
'src/win/snprintf.c',
......
......@@ -111,7 +111,6 @@ if WIN:
_libuv_source('win/poll.c'),
_libuv_source('win/process-stdio.c'),
_libuv_source('win/process.c'),
_libuv_source('win/req.c'),
_libuv_source('win/signal.c'),
_libuv_source('win/snprintf.c'),
_libuv_source('win/stream.c'),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment