summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-06-07 18:37:27 +0200
committerRyan Dahl <ry@tinyclouds.org>2011-06-07 18:59:44 +0200
commit7a5977b5d61186baaa71b19349f3fdfa41e6042e (patch)
treedcfd59c4e288b38696247213f64bf17a28b6c566 /deps
parent794cb60f9e8689ec6e163aa1119fdbb07d3024eb (diff)
downloadandroid-node-v8-7a5977b5d61186baaa71b19349f3fdfa41e6042e.tar.gz
android-node-v8-7a5977b5d61186baaa71b19349f3fdfa41e6042e.tar.bz2
android-node-v8-7a5977b5d61186baaa71b19349f3fdfa41e6042e.zip
Upgrade libuv to e58a1abff02d7bacf89a56de9050e27690a97bc5
Diffstat (limited to 'deps')
-rw-r--r--deps/uv/AUTHORS1
-rw-r--r--deps/uv/README15
-rw-r--r--deps/uv/config-mingw.mk2
-rw-r--r--deps/uv/config-unix.mk2
-rw-r--r--deps/uv/desired-api.md171
-rw-r--r--deps/uv/msvs/libuv-test.vcxproj2
-rw-r--r--deps/uv/test/benchmark-list.h10
-rw-r--r--deps/uv/test/benchmark-ping-pongs.c38
-rw-r--r--deps/uv/test/benchmark-pump.c229
-rw-r--r--deps/uv/test/benchmark-sizes.c7
-rw-r--r--deps/uv/test/echo-server.c73
-rw-r--r--deps/uv/test/runner-unix.c6
-rw-r--r--deps/uv/test/runner-win.c2
-rw-r--r--deps/uv/test/runner.c9
-rw-r--r--deps/uv/test/runner.h2
-rw-r--r--deps/uv/test/test-async.c17
-rw-r--r--deps/uv/test/test-bind-error.c56
-rw-r--r--deps/uv/test/test-callback-stack.c37
-rw-r--r--deps/uv/test/test-connection-fail.c93
-rw-r--r--deps/uv/test/test-delayed-accept.c64
-rw-r--r--deps/uv/test/test-get-currentexe.c53
-rw-r--r--deps/uv/test/test-list.h9
-rw-r--r--deps/uv/test/test-loop-handles.c79
-rw-r--r--deps/uv/test/test-ping-pong.c39
-rw-r--r--deps/uv/test/test-shutdown-eof.c176
-rw-r--r--deps/uv/test/test-tcp-writealot.c48
-rw-r--r--deps/uv/test/test-timer-again.c24
-rw-r--r--deps/uv/test/test-timer.c15
-rw-r--r--deps/uv/uv-unix.c755
-rw-r--r--deps/uv/uv-unix.h47
-rw-r--r--deps/uv/uv-win.c591
-rw-r--r--deps/uv/uv-win.h40
-rw-r--r--deps/uv/uv.h273
33 files changed, 1919 insertions, 1066 deletions
diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS
index 7cfecd0d3a..47b47d29aa 100644
--- a/deps/uv/AUTHORS
+++ b/deps/uv/AUTHORS
@@ -3,3 +3,4 @@ Ryan Dahl <ryan@joyent.com>
Bert Belder <bertbelder@gmail.com>
Josh Roesslein <jroesslein@gmail.com>
Alan Gutierrez <alan@prettyrobots.com>
+Vanilla Hsu <vanilla@fatpipi.com>
diff --git a/deps/uv/README b/deps/uv/README
index 38bb8a68f4..4e2c2cd261 100644
--- a/deps/uv/README
+++ b/deps/uv/README
@@ -6,7 +6,7 @@ http://nodejs.org/
(This was previously called liboio)
-## Supported Platforms
+Supported Platforms:
Microsoft Windows operating systems since Windows XP sp2. It can be built
with either Visual Studio or MinGW.
@@ -14,16 +14,3 @@ with either Visual Studio or MinGW.
Linux 2.6 and MacOS using the GCC toolchain.
Solaris 121 and later using GCC toolchain.
-
-## Design
-
-The goal of this library is to provide high-concurrency high-performance I/O
-on all operating systems. This is a large undertaking. Some of the API
-decisions may seem arbitrary but are actually borne out of various specific
-platform issues.
-
-## uv_read_start(), uv_read_stop()
-
-Originally we wanted to provide a asynchronous read function that was
-similar to WSARecv().
-
diff --git a/deps/uv/config-mingw.mk b/deps/uv/config-mingw.mk
index 7cd59d0f25..8d62eadfe8 100644
--- a/deps/uv/config-mingw.mk
+++ b/deps/uv/config-mingw.mk
@@ -24,7 +24,7 @@ CC = $(PREFIX)gcc
AR = $(PREFIX)ar
E=.exe
-CFLAGS=-g --std=gnu89
+CFLAGS=-g --std=gnu89 -pedantic -Wno-variadic-macros
LINKFLAGS=-lm
RUNNER_CFLAGS=$(CFLAGS) -D_GNU_SOURCE # Need _GNU_SOURCE for strdup?
diff --git a/deps/uv/config-unix.mk b/deps/uv/config-unix.mk
index af8c7d8157..baa881193c 100644
--- a/deps/uv/config-unix.mk
+++ b/deps/uv/config-unix.mk
@@ -21,7 +21,7 @@
CC = $(PREFIX)gcc
AR = $(PREFIX)ar
E=
-CFLAGS=--std=gnu89 -g -DEV_MULTIPLICITY=0
+CFLAGS=--std=gnu89 -pedantic -Wno-variadic-macros -g -DEV_MULTIPLICITY=0
LINKFLAGS=-lm
ifeq (SunOS,$(uname_S))
diff --git a/deps/uv/desired-api.md b/deps/uv/desired-api.md
new file mode 100644
index 0000000000..1af05e983a
--- /dev/null
+++ b/deps/uv/desired-api.md
@@ -0,0 +1,171 @@
+Warning: this is not actual API but desired API.
+
+# `uv_handle_t`
+
+This is the abstract base class of all types of handles. All handles have in
+common:
+
+* When handles are initialized, the reference count to the event loop is
+ increased by one.
+
+* The user owns the `uv_handle_t` memory and is in charge of freeing it.
+
+* In order to free resources associated with a handle, one must `uv_close()`
+ and wait for the `uv_close_cb` callback. After the close callback has been
+ made, the user is allowed to the `uv_handle_t` object.
+
+* The `uv_close_cb` is always made directly off the event loop. That is, it
+ is not called from `uv_close()`.
+
+
+
+# `uv_tcp_server_t`
+
+A TCP server class that is a subclass of `uv_handle_t`. This can be bound to
+an address and begin accepting new TCP sockets.
+
+ int uv_bind4(uv_tcp_server_t* tcp_server, struct sockaddr_in* address);
+ int uv_bind6(uv_tcp_server_t* tcp_server, struct sockaddr_in6* address);
+
+Binds the TCP server to an address. The `address` can be created with
+`uv_ip4_addr()`. Call this before `uv_listen()`
+
+Returns zero on success, -1 on failure. Errors in order of least-seriousness:
+
+* `UV_EADDRINUSE` There is already another socket bound to the specified
+ address.
+
+* `UV_EADDRNOTAVAIL` The `address` parameter is an IP address that is not
+
+* `UV_EINVAL` The server is already bound to an address.
+
+* `UV_EFAULT` Memory of `address` parameter is unintelligible.
+
+
+ int uv_listen(uv_tcp_server_t*, int backlog, uv_connection_cb cb);
+
+Begins listening for connections. The accept callback is level-triggered.
+
+
+ int uv_accept(uv_tcp_server_t* server,
+ uv_tcp_t* client,
+ uv_close_cb close_cb,
+ void* data);
+
+Accepts a connection. This should be called after the accept callback is
+made. The `client` parameter should be uninitialized memory; `uv_accept` is
+used instead of `uv_tcp_init` for server-side `uv_tcp_t` initialization.
+
+Return value 0 indicates success, -1 failure. Possible errors:
+
+* `UV_EAGAIN` There are no connections. Wait for the `uv_connection_cb` callback
+ to be called again.
+
+* `UV_EFAULT` The memory of either `server` is unintelligible.
+
+
+
+# `uv_stream_t`
+
+An abstract subclass of `uv_handle_t`. Streams represent something that
+reads and/or writes data. Streams can be half or full-duplex. TCP sockets
+are streams, files are streams with offsets.
+
+ int uv_read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+
+Starts the stream reading continuously. The `alloc_cb` is used to allow the
+user to implement various means of supplying the stream with buffers to
+fill. The `read_cb` returns buffers to the user filled with data.
+
+Sometimes the buffers returned to the user do not contain data. This does
+not indicate EOF as in other systems. EOF is made via the `uv_eof_cb` which
+can be set like this `uv_set_eof_cb(stream, eof_cb);`
+
+
+ int uv_read_stop(uv_stream_t* stream);
+
+Stops reading from the stream.
+
+ int uv_write_req_init(uv_write_req_t*,
+ uv_stream_t*,
+ uv_buf_t bufs[],
+ int butcnf,
+ uv_close_cb close_cb,
+ void* data);
+
+Initiates a write request on a stream.
+
+ int uv_shutdown_req_init(uv_shutdown_req_t*, uv_stream_t*)
+
+Initiates a shutdown of outgoing data once the write queue drains.
+
+
+
+# `uv_tcp_t`
+
+The TCP handle class represents one endpoint of a duplex TCP stream.
+`uv_tcp_t` is a subclass of `uv_stream_t`. A TCP handle can represent a
+client side connection (one that has been used with uv_connect_req_init`)
+or a server-side connection (one that was initialized with `uv_accept`)
+
+ int uv_connect_req_init(uv_connect_req_t* req,
+ uv_tcp_t* socket,
+ struct sockaddr* addr,
+ uv_close_cb close_cb,
+ void* data);
+
+Initiates a request to open a connection.
+
+
+
+# `uv_req_t`
+
+Abstract class represents an asynchronous request. This is a subclass of `uv_handle_t`.
+
+
+# `uv_connect_req_t`
+
+Subclass of `uv_req_t`. Represents a request for a TCP connection. Operates
+on `uv_tcp_t` handles. Like other types of requests the `close_cb` indicates
+completion of the request.
+
+ int uv_connect_req_init(uv_connect_req_t* req,
+ uv_tcp_t* socket,
+ struct sockaddr* addr,
+ uv_close_cb close_cb,
+ void* data);
+
+Initializes the connection request. Returning 0 indicates success, -1 if
+there was an error. The following values can be retrieved from
+`uv_last_error` in the case of an error:
+
+* ???
+
+
+# `uv_shutdown_req_t`
+
+Subclass of `uv_req_t`. Represents an ongoing shutdown request. Once the
+write queue of the parent `uv_stream_t` is drained, the outbound data
+channel is shutdown. Once a shutdown request is initiated on a stream, the
+stream will allow no more writes.
+
+ int uv_shutdown_req_init(uv_shutdown_req_t*,
+ uv_stream_t* parent,
+ uv_close_cb close_cb,
+ void* data);
+
+Initializes the shutdown request.
+
+
+# `uv_write_req_t`
+
+ int uv_write_req_init(uv_write_req_t*,
+ uv_stream_t*,
+ uv_buf_t bufs[],
+ int butcnf,
+ uv_close_cb close_cb,
+ void* data);
+
+Initiates a write request on a stream.
diff --git a/deps/uv/msvs/libuv-test.vcxproj b/deps/uv/msvs/libuv-test.vcxproj
index 7d1922a398..0c360b9054 100644
--- a/deps/uv/msvs/libuv-test.vcxproj
+++ b/deps/uv/msvs/libuv-test.vcxproj
@@ -147,6 +147,7 @@
<ClCompile Include="..\test\test-delayed-accept.c" />
<ClCompile Include="..\test\test-callback-stack.c" />
<ClCompile Include="..\test\test-connection-fail.c" />
+ <ClCompile Include="..\test\test-get-currentexe.c" />
<ClCompile Include="..\test\test-fail-always.c" />
<ClCompile Include="..\test\test-loop-handles.c" />
<ClCompile Include="..\test\test-pass-always.c" />
@@ -154,6 +155,7 @@
<ClCompile Include="..\test\runner-win.c" />
<ClCompile Include="..\test\runner.c" />
<ClCompile Include="..\test\test-bind-error.c" />
+ <ClCompile Include="..\test\test-shutdown-eof.c" />
<ClCompile Include="..\test\test-tcp-writealot.c" />
<ClCompile Include="..\test\test-timer-again.c" />
<ClCompile Include="..\test\test-timer.c" />
diff --git a/deps/uv/test/benchmark-list.h b/deps/uv/test/benchmark-list.h
index d90475e98e..33ef97e260 100644
--- a/deps/uv/test/benchmark-list.h
+++ b/deps/uv/test/benchmark-list.h
@@ -21,7 +21,9 @@
BENCHMARK_DECLARE (sizes)
BENCHMARK_DECLARE (ping_pongs)
-BENCHMARK_DECLARE (pump)
+BENCHMARK_DECLARE (pump100_client)
+BENCHMARK_DECLARE (pump1_client)
+HELPER_DECLARE (pump_server)
HELPER_DECLARE (echo_server)
TASK_LIST_START
@@ -30,5 +32,9 @@ TASK_LIST_START
BENCHMARK_ENTRY (ping_pongs)
BENCHMARK_HELPER (ping_pongs, echo_server)
- BENCHMARK_ENTRY (pump)
+ BENCHMARK_ENTRY (pump100_client)
+ BENCHMARK_HELPER (pump100_client, pump_server)
+
+ BENCHMARK_ENTRY (pump1_client)
+ BENCHMARK_HELPER (pump1_client, pump_server)
TASK_LIST_END
diff --git a/deps/uv/test/benchmark-ping-pongs.c b/deps/uv/test/benchmark-ping-pongs.c
index 1bda337779..9fdd560821 100644
--- a/deps/uv/test/benchmark-ping-pongs.c
+++ b/deps/uv/test/benchmark-ping-pongs.c
@@ -33,7 +33,7 @@
typedef struct {
int pongs;
int state;
- uv_handle_t handle;
+ uv_tcp_t tcp;
uv_req_t connect_req;
uv_req_t shutdown_req;
} pinger_t;
@@ -47,12 +47,12 @@ typedef struct buf_s {
static char PING[] = "PING\n";
static buf_t* buf_freelist = NULL;
-
+static int pinger_shutdown_cb_called;
static int completed_pingers = 0;
static int64_t start_time;
-static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) {
+static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) {
buf_t* ab;
ab = buf_freelist;
@@ -107,7 +107,7 @@ static void pinger_write_ping(pinger_t* pinger) {
buf.len = strlen(PING);
req = (uv_req_t*)malloc(sizeof(*req));
- uv_req_init(req, &pinger->handle, pinger_write_cb);
+ uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_write_cb);
if (uv_write(req, &buf, 1)) {
FATAL("uv_write failed");
@@ -117,14 +117,21 @@ static void pinger_write_ping(pinger_t* pinger) {
static void pinger_shutdown_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
+ pinger_shutdown_cb_called++;
+
+ /*
+ * The close callback has not been triggered yet. We must wait for EOF
+ * until we close the connection.
+ */
+ ASSERT(completed_pingers == 0);
}
-static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
+static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
unsigned int i;
pinger_t* pinger;
- pinger = (pinger_t*)handle->data;
+ pinger = (pinger_t*)tcp->data;
if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF);
@@ -133,6 +140,9 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
buf_free(buf);
}
+ ASSERT(pinger_shutdown_cb_called == 1);
+ uv_close((uv_handle_t*)tcp);
+
return;
}
@@ -143,10 +153,9 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (pinger->state == 0) {
pinger->pongs++;
if (uv_now() - start_time > TIME) {
- uv_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb);
+ uv_req_init(&pinger->shutdown_req, (uv_handle_t*)tcp, pinger_shutdown_cb);
uv_shutdown(&pinger->shutdown_req);
break;
- return;
} else {
pinger_write_ping(pinger);
}
@@ -164,7 +173,7 @@ static void pinger_connect_cb(uv_req_t *req, int status) {
pinger_write_ping(pinger);
- if (uv_read_start(req->handle, pinger_read_cb)) {
+ if (uv_read_start((uv_tcp_t*)(req->handle), buf_alloc, pinger_read_cb)) {
FATAL("uv_read_start failed");
}
}
@@ -181,21 +190,22 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
- r = uv_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger);
+ r = uv_tcp_init(&pinger->tcp, pinger_close_cb, (void*)pinger);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
- uv_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb);
+ uv_req_init(&pinger->connect_req, (uv_handle_t*)&pinger->tcp,
+ pinger_connect_cb);
- uv_bind(&pinger->handle, (struct sockaddr*)&client_addr);
- r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
+ uv_bind(&pinger->tcp, client_addr);
+ r = uv_connect(&pinger->connect_req, server_addr);
ASSERT(!r);
}
BENCHMARK_IMPL(ping_pongs) {
- uv_init(buf_alloc);
+ uv_init();
start_time = uv_now();
pinger_new();
diff --git a/deps/uv/test/benchmark-pump.c b/deps/uv/test/benchmark-pump.c
index e9b3031ca0..960a68139b 100644
--- a/deps/uv/test/benchmark-pump.c
+++ b/deps/uv/test/benchmark-pump.c
@@ -26,7 +26,7 @@
#include <stdio.h>
-#define TARGET_CONNECTIONS 100
+static int TARGET_CONNECTIONS;
#define WRITE_BUFFER_SIZE 8192
#define MAX_SIMULTANEOUS_CONNECTS 100
@@ -35,22 +35,24 @@
#define STATS_COUNT 5
-static void do_write(uv_handle_t* handle);
+static void do_write(uv_tcp_t*);
static void maybe_connect_some();
static uv_req_t* req_alloc();
static void req_free(uv_req_t* uv_req);
-static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size);
+static uv_buf_t buf_alloc(uv_tcp_t*, size_t size);
static void buf_free(uv_buf_t uv_buf_t);
+static uv_tcp_t server;
static struct sockaddr_in listen_addr;
static struct sockaddr_in connect_addr;
static int64_t start_time;
static int max_connect_socket = 0;
+static int max_read_sockets = 0;
static int read_sockets = 0;
static int write_sockets = 0;
@@ -63,10 +65,12 @@ static int stats_left = 0;
static char write_buffer[WRITE_BUFFER_SIZE];
-static uv_handle_t read_handles[TARGET_CONNECTIONS];
-static uv_handle_t write_handles[TARGET_CONNECTIONS];
+/* Make this as large as you need. */
+#define MAX_WRITE_HANDLES 1000
-static uv_handle_t timer_handle;
+static uv_tcp_t write_handles[MAX_WRITE_HANDLES];
+
+static uv_timer_t timer_handle;
static double gbit(int64_t bytes, int64_t passed_ms) {
@@ -79,9 +83,8 @@ static void show_stats(uv_handle_t *handle, int status) {
int64_t diff;
#if PRINT_STATS
- LOGF("connections: %d, read: %.1f gbit/s, write: %.1f gbit/s\n",
- read_sockets,
- gbit(nrecv, STATS_INTERVAL),
+ LOGF("connections: %d, write: %.1f gbit/s\n",
+ write_sockets,
gbit(nsent, STATS_INTERVAL));
#endif
@@ -91,8 +94,8 @@ static void show_stats(uv_handle_t *handle, int status) {
uv_update_time();
diff = uv_now() - start_time;
- LOGF("pump_%d: %.1f gbit/s\n", read_sockets,
- gbit(nrecv_total, diff));
+ LOGF("pump%d_client: %.1f gbit/s\n", write_sockets,
+ gbit(nsent_total, diff));
exit(0);
}
@@ -103,8 +106,38 @@ static void show_stats(uv_handle_t *handle, int status) {
}
-void close_cb(uv_handle_t* handle, int status) {
+static void read_show_stats() {
+ int64_t diff;
+
+ uv_update_time();
+ diff = uv_now() - start_time;
+
+ LOGF("pump%d_server: %.1f gbit/s\n", max_read_sockets,
+ gbit(nrecv_total, diff));
+}
+
+
+
+void write_sockets_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
+ /* If any client closes, the process is done. */
+ exit(0);
+}
+
+
+void read_sockets_close_cb(uv_handle_t* handle, int status) {
+ ASSERT(status == 0);
+
+ free(handle);
+ read_sockets--;
+
+ /* If it's past the first second and everyone has closed their connection
+ * Then print stats.
+ */
+ if (uv_now() - start_time > 1000 && read_sockets == 0) {
+ read_show_stats();
+ uv_close((uv_handle_t*)&server);
+ }
}
@@ -114,15 +147,27 @@ static void start_stats_collection() {
/* Show-stats timer */
stats_left = STATS_COUNT;
- r = uv_timer_init(&timer_handle, close_cb, NULL);
+ r = uv_timer_init(&timer_handle, NULL, NULL);
ASSERT(r == 0);
r = uv_timer_start(&timer_handle, show_stats, STATS_INTERVAL, STATS_INTERVAL);
ASSERT(r == 0);
+
+ uv_update_time();
+ start_time = uv_now();
}
-static void read_cb(uv_handle_t* handle, int bytes, uv_buf_t buf) {
- ASSERT(bytes >= 0);
+static void read_cb(uv_tcp_t* tcp, int bytes, uv_buf_t buf) {
+ if (nrecv_total == 0) {
+ ASSERT(start_time == 0);
+ uv_update_time();
+ start_time = uv_now();
+ }
+
+ if (bytes < 0) {
+ uv_close((uv_handle_t*)tcp);
+ return;
+ }
buf_free(buf);
@@ -141,11 +186,11 @@ static void write_cb(uv_req_t *req, int status) {
nsent += sizeof write_buffer;
nsent_total += sizeof write_buffer;
- do_write(req->handle);
+ do_write((uv_tcp_t*)req->handle);
}
-static void do_write(uv_handle_t* handle) {
+static void do_write(uv_tcp_t* tcp) {
uv_req_t* req;
uv_buf_t buf;
int r;
@@ -153,31 +198,19 @@ static void do_write(uv_handle_t* handle) {
buf.base = (char*) &write_buffer;
buf.len = sizeof write_buffer;
- while (handle->write_queue_size == 0) {
+ while (tcp->write_queue_size == 0) {
req = req_alloc();
- uv_req_init(req, handle, write_cb);
+ uv_req_init(req, (uv_handle_t*)tcp, write_cb);
r = uv_write(req, &buf, 1);
ASSERT(r == 0);
}
}
-static void maybe_start_writing() {
- int i;
-
- if (read_sockets == TARGET_CONNECTIONS &&
- write_sockets == TARGET_CONNECTIONS) {
- start_stats_collection();
-
- /* Yay! start writing */
- for (i = 0; i < write_sockets; i++) {
- do_write(&write_handles[i]);
- }
- }
-}
-
static void connect_cb(uv_req_t* req, int status) {
+ int i;
+
if (status) LOG(uv_strerror(uv_last_error()));
ASSERT(status == 0);
@@ -185,78 +218,55 @@ static void connect_cb(uv_req_t* req, int status) {
req_free(req);
maybe_connect_some();
- maybe_start_writing();
-}
-
-
-static void do_connect(uv_handle_t* handle, struct sockaddr* addr) {
- uv_req_t* req;
- int r;
-
- r = uv_tcp_init(handle, close_cb, NULL);
- ASSERT(r == 0);
-
- req = req_alloc();
- uv_req_init(req, handle, connect_cb);
- r = uv_connect(req, addr);
- ASSERT(r == 0);
-}
+ if (write_sockets == TARGET_CONNECTIONS) {
+ start_stats_collection();
-static void maybe_connect_some() {
- while (max_connect_socket < TARGET_CONNECTIONS &&
- max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) {
- do_connect(&write_handles[max_connect_socket++],
- (struct sockaddr*) &connect_addr);
+ /* Yay! start writing */
+ for (i = 0; i < write_sockets; i++) {
+ do_write(&write_handles[i]);
+ }
}
}
-static void accept_cb(uv_handle_t* server) {
- uv_handle_t* handle;
+static void maybe_connect_some() {
+ uv_req_t* req;
+ uv_tcp_t* tcp;
int r;
- ASSERT(read_sockets < TARGET_CONNECTIONS);
- handle = &read_handles[read_sockets];
-
- r = uv_accept(server, handle, close_cb, NULL);
- ASSERT(r == 0);
-
- r = uv_read_start(handle, read_cb);
- ASSERT(r == 0);
+ while (max_connect_socket < TARGET_CONNECTIONS &&
+ max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) {
+ tcp = &write_handles[max_connect_socket++];
- read_sockets++;
+ r = uv_tcp_init(tcp, write_sockets_close_cb, NULL);
+ ASSERT(r == 0);
- maybe_start_writing();
+ req = req_alloc();
+ uv_req_init(req, (uv_handle_t*)tcp, connect_cb);
+ r = uv_connect(req, connect_addr);
+ ASSERT(r == 0);
+ }
}
-BENCHMARK_IMPL(pump) {
- uv_handle_t server;
+static void connection_cb(uv_tcp_t* s, int status) {
+ uv_tcp_t* tcp;
int r;
- uv_init(buf_alloc);
+ ASSERT(&server == s);
+ ASSERT(status == 0);
- listen_addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
- connect_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
+ tcp = malloc(sizeof(uv_tcp_t));
- /* Server */
- r = uv_tcp_init(&server, close_cb, NULL);
- ASSERT(r == 0);
- r = uv_bind(&server, (struct sockaddr*) &listen_addr);
- ASSERT(r == 0);
- r = uv_listen(&server, TARGET_CONNECTIONS, accept_cb);
+ r = uv_accept(s, tcp, read_sockets_close_cb, NULL);
ASSERT(r == 0);
- uv_update_time();
- start_time = uv_now();
-
- /* Start making connections */
- maybe_connect_some();
-
- uv_run();
+ r = uv_read_start(tcp, buf_alloc, read_cb);
+ ASSERT(r == 0);
- return 0;
+ read_sockets++;
+ max_read_sockets++;
}
@@ -308,7 +318,7 @@ typedef struct buf_list_s {
static buf_list_t* buf_freelist = NULL;
-static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) {
+static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) {
buf_list_t* buf;
buf = buf_freelist;
@@ -331,3 +341,50 @@ static void buf_free(uv_buf_t uv_buf_t) {
buf->next = buf_freelist;
buf_freelist = buf;
}
+
+
+HELPER_IMPL(pump_server) {
+ int r;
+
+ uv_init();
+ listen_addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
+
+ /* Server */
+ r = uv_tcp_init(&server, NULL, NULL);
+ ASSERT(r == 0);
+ r = uv_bind(&server, listen_addr);
+ ASSERT(r == 0);
+ r = uv_listen(&server, MAX_WRITE_HANDLES, connection_cb);
+ ASSERT(r == 0);
+
+ uv_run();
+
+ return 0;
+}
+
+
+void pump(int n) {
+ ASSERT(n <= MAX_WRITE_HANDLES);
+ TARGET_CONNECTIONS = n;
+
+ uv_init();
+
+ connect_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
+
+ /* Start making connections */
+ maybe_connect_some();
+
+ uv_run();
+}
+
+
+BENCHMARK_IMPL(pump100_client) {
+ pump(100);
+ return 0;
+}
+
+
+BENCHMARK_IMPL(pump1_client) {
+ pump(1);
+ return 0;
+}
diff --git a/deps/uv/test/benchmark-sizes.c b/deps/uv/test/benchmark-sizes.c
index cc8f7d4373..a5f573feec 100644
--- a/deps/uv/test/benchmark-sizes.c
+++ b/deps/uv/test/benchmark-sizes.c
@@ -24,7 +24,12 @@
BENCHMARK_IMPL(sizes) {
- LOGF("uv_handle_t: %lu bytes\n", sizeof(uv_handle_t));
LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t));
+ LOGF("uv_tcp_t: %lu bytes\n", sizeof(uv_tcp_t));
+ LOGF("uv_prepare_t: %lu bytes\n", sizeof(uv_prepare_t));
+ LOGF("uv_check_t: %lu bytes\n", sizeof(uv_check_t));
+ LOGF("uv_idle_t: %lu bytes\n", sizeof(uv_idle_t));
+ LOGF("uv_async_t: %lu bytes\n", sizeof(uv_async_t));
+ LOGF("uv_timer_t: %lu bytes\n", sizeof(uv_timer_t));
return 0;
}
diff --git a/deps/uv/test/echo-server.c b/deps/uv/test/echo-server.c
index 0fcc935f36..e8fd61f4f1 100644
--- a/deps/uv/test/echo-server.c
+++ b/deps/uv/test/echo-server.c
@@ -31,13 +31,14 @@ typedef struct {
} write_req_t;
-static uv_handle_t server;
+static int server_closed;
+static uv_tcp_t server;
static void after_write(uv_req_t* req, int status);
-static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf);
+static void after_read(uv_tcp_t*, int nread, uv_buf_t buf);
static void on_close(uv_handle_t* peer, int status);
-static void on_accept(uv_handle_t* handle);
+static void on_connection(uv_tcp_t*, int status);
static void after_write(uv_req_t* req, int status) {
@@ -58,11 +59,13 @@ static void after_write(uv_req_t* req, int status) {
static void after_shutdown(uv_req_t* req, int status) {
+ uv_close(req->handle);
free(req);
}
-static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
+static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) {
+ int i;
write_req_t *wr;
uv_req_t* req;
@@ -75,7 +78,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
}
req = (uv_req_t*) malloc(sizeof *req);
- uv_req_init(req, handle, after_shutdown);
+ uv_req_init(req, (uv_handle_t*)handle, after_shutdown);
uv_shutdown(req);
return;
@@ -87,9 +90,19 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
return;
}
+ /* Scan for the letter Q which signals that we should quit. */
+ if (!server_closed) {
+ for (i = 0; i < nread; i++) {
+ if (buf.base[i] == 'Q') {
+ uv_close((uv_handle_t*)&server);
+ server_closed = 1;
+ }
+ }
+ }
+
wr = (write_req_t*) malloc(sizeof *wr);
- uv_req_init(&wr->req, handle, after_write);
+ uv_req_init(&wr->req, (uv_handle_t*)handle, after_write);
wr->buf.base = buf.base;
wr->buf.len = nread;
if (uv_write(&wr->req, &wr->buf, 1)) {
@@ -105,19 +118,33 @@ static void on_close(uv_handle_t* peer, int status) {
}
-static void on_accept(uv_handle_t* server) {
- uv_handle_t* handle = (uv_handle_t*) malloc(sizeof *handle);
+static uv_buf_t echo_alloc(uv_tcp_t* handle, size_t suggested_size) {
+ uv_buf_t buf;
+ buf.base = (char*) malloc(suggested_size);
+ buf.len = suggested_size;
+ return buf;
+}
- if (uv_accept(server, handle, on_close, NULL)) {
- FATAL("uv_accept failed");
- }
- uv_read_start(handle, after_read);
+static void on_connection(uv_tcp_t* server, int status) {
+ uv_tcp_t* handle;
+ int r;
+
+ ASSERT(status == 0);
+
+ handle = (uv_tcp_t*) malloc(sizeof *handle);
+ ASSERT(handle != NULL);
+
+ r = uv_accept(server, handle, on_close, NULL);
+ ASSERT(r == 0);
+
+ r = uv_read_start(handle, echo_alloc, after_read);
+ ASSERT(r == 0);
}
static void on_server_close(uv_handle_t* handle, int status) {
- ASSERT(handle == &server);
+ ASSERT(handle == (uv_handle_t*)&server);
ASSERT(status == 0);
}
@@ -133,14 +160,14 @@ static int echo_start(int port) {
return 1;
}
- r = uv_bind(&server, (struct sockaddr*) &addr);
+ r = uv_bind(&server, addr);
if (r) {
/* TODO: Error codes */
fprintf(stderr, "Bind error\n");
return 1;
}
- r = uv_listen(&server, 128, on_accept);
+ r = uv_listen(&server, 128, on_connection);
if (r) {
/* TODO: Error codes */
fprintf(stderr, "Listen error\n");
@@ -151,25 +178,11 @@ static int echo_start(int port) {
}
-static int echo_stop() {
- return uv_close(&server);
-}
-
-
-static uv_buf_t echo_alloc(uv_handle_t* handle, size_t suggested_size) {
- uv_buf_t buf;
- buf.base = (char*) malloc(suggested_size);
- buf.len = suggested_size;
- return buf;
-}
-
-
HELPER_IMPL(echo_server) {
- uv_init(echo_alloc);
+ uv_init();
if (echo_start(TEST_PORT))
return 1;
- fprintf(stderr, "Listening!\n");
uv_run();
return 0;
}
diff --git a/deps/uv/test/runner-unix.c b/deps/uv/test/runner-unix.c
index 7af10c45e2..139b671942 100644
--- a/deps/uv/test/runner-unix.c
+++ b/deps/uv/test/runner-unix.c
@@ -37,10 +37,6 @@
#include <sys/select.h>
#include <pthread.h>
-#define PATHMAX 1024
-static char executable_path[PATHMAX] = { '\0' };
-
-
#ifdef __APPLE__
#include <mach-o/dyld.h> /* _NSGetExecutablePath */
@@ -84,7 +80,7 @@ int process_start(char* name, process_info_t* p) {
p->terminated = 0;
p->status = 0;
- pid_t pid = vfork();
+ pid_t pid = fork();
if (pid < 0) {
perror("vfork");
diff --git a/deps/uv/test/runner-win.c b/deps/uv/test/runner-win.c
index a91b7d5f5e..09458a6b5c 100644
--- a/deps/uv/test/runner-win.c
+++ b/deps/uv/test/runner-win.c
@@ -47,6 +47,8 @@ void platform_init(int argc, char **argv) {
/* Disable stdio output buffering. */
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
+
+ strcpy(executable_path, argv[0]);
}
diff --git a/deps/uv/test/runner.c b/deps/uv/test/runner.c
index f7caff7987..778bfb6573 100644
--- a/deps/uv/test/runner.c
+++ b/deps/uv/test/runner.c
@@ -24,6 +24,7 @@
#include "runner.h"
#include "task.h"
+char executable_path[PATHMAX] = { '\0' };
/* Start a specific process declared by TEST_ENTRY or TEST_HELPER. */
/* Returns the exit code of the specific process. */
@@ -75,6 +76,9 @@ int run_task(task_entry_t *test, int timeout, int benchmark_output) {
}
}
+ /* Wait a little bit to allow servers to start. Racy. */
+ uv_sleep(50);
+
/* Start the main test process. */
if (process_start(test->process_name, &processes[process_count]) == -1) {
snprintf((char*)&errmsg, sizeof(errmsg), "process `%s` failed to start.",
@@ -117,8 +121,7 @@ finalize:
/* Show error and output from processes if the test failed. */
if (!success) {
- LOG("\n=============================================================\n");
- LOGF("`%s` failed: %s\n", test->task_name, errmsg);
+ LOGF("\n`%s` failed: %s\n", test->task_name, errmsg);
for (i = 0; i < process_count; i++) {
switch (process_output_size(&processes[i])) {
@@ -138,7 +141,7 @@ finalize:
break;
}
}
- LOG("\n");
+ LOG("=============================================================\n");
/* In benchmark mode show concise output from the main process. */
} else if (benchmark_output) {
diff --git a/deps/uv/test/runner.h b/deps/uv/test/runner.h
index a6af951f2a..11d498020a 100644
--- a/deps/uv/test/runner.h
+++ b/deps/uv/test/runner.h
@@ -79,6 +79,8 @@ int run_task(task_entry_t *test, int timeout, int benchmark_output);
#define TEST_HELPER HELPER_ENTRY
#define BENCHMARK_HELPER HELPER_ENTRY
+#define PATHMAX 1024
+extern char executable_path[PATHMAX];
/*
* Include platform-dependent definitions
diff --git a/deps/uv/test/test-async.c b/deps/uv/test/test-async.c
index 7bedcf4a95..810de292dd 100644
--- a/deps/uv/test/test-async.c
+++ b/deps/uv/test/test-async.c
@@ -25,9 +25,9 @@
#include <stdlib.h>
-static uv_handle_t prepare_handle;
+static uv_prepare_t prepare_handle;
-static uv_handle_t async1_handle;
+static uv_async_t async1_handle;
/* static uv_handle_t async2_handle; */
static int prepare_cb_called = 0;
@@ -120,15 +120,8 @@ static void close_cb(uv_handle_t* handle, int status) {
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf = {0, 0};
- FATAL("alloc should not be called");
- return buf;
-}
-
-
static void async1_cb(uv_handle_t* handle, int status) {
- ASSERT(handle == &async1_handle);
+ ASSERT(handle == (uv_handle_t*)&async1_handle);
ASSERT(status == 0);
async1_cb_called++;
@@ -159,7 +152,7 @@ static void async2_cb(uv_handle_t* handle, int status) {
static void prepare_cb(uv_handle_t* handle, int status) {
int r;
- ASSERT(handle == &prepare_handle);
+ ASSERT(handle == (uv_handle_t*)&prepare_handle);
ASSERT(status == 0);
switch (prepare_cb_called) {
@@ -196,7 +189,7 @@ static void prepare_cb(uv_handle_t* handle, int status) {
TEST_IMPL(async) {
int r;
- uv_init(alloc_cb);
+ uv_init();
r = uv_prepare_init(&prepare_handle, close_cb, NULL);
ASSERT(r == 0);
diff --git a/deps/uv/test/test-bind-error.c b/deps/uv/test/test-bind-error.c
index ca9ccc5885..a195fb2cad 100644
--- a/deps/uv/test/test-bind-error.c
+++ b/deps/uv/test/test-bind-error.c
@@ -36,28 +36,21 @@ static void close_cb(uv_handle_t* handle, int status) {
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf = {0, 0};
- FATAL("alloc should not be called");
- return buf;
-}
-
-
TEST_IMPL(bind_error_addrinuse) {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
- uv_handle_t server1, server2;
+ uv_tcp_t server1, server2;
int r;
- uv_init(alloc_cb);
+ uv_init();
r = uv_tcp_init(&server1, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(&server1, (struct sockaddr*) &addr);
+ r = uv_bind(&server1, addr);
ASSERT(r == 0);
r = uv_tcp_init(&server2, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(&server2, (struct sockaddr*) &addr);
+ r = uv_bind(&server2, addr);
ASSERT(r == 0);
r = uv_listen(&server1, 128, NULL);
@@ -67,8 +60,8 @@ TEST_IMPL(bind_error_addrinuse) {
ASSERT(uv_last_error().code == UV_EADDRINUSE);
- uv_close(&server1);
- uv_close(&server2);
+ uv_close((uv_handle_t*)&server1);
+ uv_close((uv_handle_t*)&server2);
uv_run();
@@ -80,21 +73,21 @@ TEST_IMPL(bind_error_addrinuse) {
TEST_IMPL(bind_error_addrnotavail_1) {
struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT);
- uv_handle_t server;
+ uv_tcp_t server;
int r;
- uv_init(alloc_cb);
+ uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(&server, (struct sockaddr*) &addr);
+ r = uv_bind(&server, addr);
/* It seems that Linux is broken here - bind succeeds. */
if (r == -1) {
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
}
- uv_close(&server);
+ uv_close((uv_handle_t*)&server);
uv_run();
@@ -106,18 +99,18 @@ TEST_IMPL(bind_error_addrnotavail_1) {
TEST_IMPL(bind_error_addrnotavail_2) {
struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT);
- uv_handle_t server;
+ uv_tcp_t server;
int r;
- uv_init(alloc_cb);
+ uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(&server, (struct sockaddr*) &addr);
+ r = uv_bind(&server, addr);
ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
- uv_close(&server);
+ uv_close((uv_handle_t*)&server);
uv_run();
@@ -129,19 +122,22 @@ TEST_IMPL(bind_error_addrnotavail_2) {
TEST_IMPL(bind_error_fault) {
char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah";
- uv_handle_t server;
+ struct sockaddr_in* garbage_addr;
+ uv_tcp_t server;
int r;
- uv_init(alloc_cb);
+ garbage_addr = (struct sockaddr_in*) &garbage;
+
+ uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(&server, (struct sockaddr*) &garbage);
+ r = uv_bind(&server, *garbage_addr);
ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EFAULT);
- uv_close(&server);
+ uv_close((uv_handle_t*)&server);
uv_run();
@@ -155,21 +151,21 @@ TEST_IMPL(bind_error_fault) {
TEST_IMPL(bind_error_inval) {
struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT);
struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2);
- uv_handle_t server;
+ uv_tcp_t server;
int r;
- uv_init(alloc_cb);
+ uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(&server, (struct sockaddr*) &addr1);
+ r = uv_bind(&server, addr1);
ASSERT(r == 0);
- r = uv_bind(&server, (struct sockaddr*) &addr2);
+ r = uv_bind(&server, addr2);
ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EINVAL);
- uv_close(&server);
+ uv_close((uv_handle_t*)&server);
uv_run();
diff --git a/deps/uv/test/test-callback-stack.c b/deps/uv/test/test-callback-stack.c
index d136c9a764..b1755b656f 100644
--- a/deps/uv/test/test-callback-stack.c
+++ b/deps/uv/test/test-callback-stack.c
@@ -30,7 +30,8 @@
static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone.";
-static uv_handle_t client, timer;
+static uv_tcp_t client;
+static uv_timer_t timer;
static uv_req_t connect_req, write_req, shutdown_req;
static int nested = 0;
@@ -42,6 +43,15 @@ static int bytes_received = 0;
static int shutdown_cb_called = 0;
+static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
+ uv_buf_t buf;
+ buf.len = size;
+ buf.base = (char*) malloc(size);
+ ASSERT(buf.base);
+ return buf;
+}
+
+
static void close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
ASSERT(nested == 0 && "close_cb must be called from a fresh stack");
@@ -58,7 +68,7 @@ static void shutdown_cb(uv_req_t* req, int status) {
}
-static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
+static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
printf("Read. nread == %d\n", nread);
@@ -72,7 +82,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
ASSERT(uv_last_error().code == UV_EOF);
nested++;
- if (uv_close(handle)) {
+ if (uv_close((uv_handle_t*)tcp)) {
FATAL("uv_close failed");
}
nested--;
@@ -88,7 +98,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
/* from a fresh stack. */
if (bytes_received == sizeof MESSAGE) {
nested++;
- uv_req_init(&shutdown_req, handle, shutdown_cb);
+ uv_req_init(&shutdown_req, (uv_handle_t*)tcp, shutdown_cb);
puts("Shutdown");
@@ -103,14 +113,14 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void timer_cb(uv_handle_t* handle, int status) {
int r;
- ASSERT(handle == &timer);
+ ASSERT(handle == (uv_handle_t*)&timer);
ASSERT(status == 0);
ASSERT(nested == 0 && "timer_cb must be called from a fresh stack");
puts("Timeout complete. Now read data...");
nested++;
- if (uv_read_start(&client, read_cb)) {
+ if (uv_read_start(&client, alloc_cb, read_cb)) {
FATAL("uv_read_start failed");
}
nested--;
@@ -170,19 +180,10 @@ static void connect_cb(uv_req_t* req, int status) {
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf;
- buf.len = size;
- buf.base = (char*) malloc(size);
- ASSERT(buf.base);
- return buf;
-}
-
-
TEST_IMPL(callback_stack) {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
- uv_init(alloc_cb);
+ uv_init();
if (uv_tcp_init(&client, &close_cb, NULL)) {
FATAL("uv_tcp_init failed");
@@ -191,8 +192,8 @@ TEST_IMPL(callback_stack) {
puts("Connecting...");
nested++;
- uv_req_init(&connect_req, &client, connect_cb);
- if (uv_connect(&connect_req, (struct sockaddr*) &addr)) {
+ uv_req_init(&connect_req, (uv_handle_t*)&client, connect_cb);
+ if (uv_connect(&connect_req, addr)) {
FATAL("uv_connect failed");
}
nested--;
diff --git a/deps/uv/test/test-connection-fail.c b/deps/uv/test/test-connection-fail.c
index e28c7c36e6..df7cdf5c4e 100644
--- a/deps/uv/test/test-connection-fail.c
+++ b/deps/uv/test/test-connection-fail.c
@@ -26,11 +26,15 @@
#include <stdio.h>
-static uv_handle_t handle;
+static uv_tcp_t tcp;
static uv_req_t req;
static int connect_cb_calls;
static int close_cb_calls;
+static uv_timer_t timer;
+static int timer_close_cb_calls;
+static int timer_cb_calls;
+
static void on_close(uv_handle_t* handle, int status) {
ASSERT(status == 0);
@@ -38,48 +42,111 @@ static void on_close(uv_handle_t* handle, int status) {
}
-static void on_connect(uv_req_t *req, int status) {
+static void timer_close_cb(uv_handle_t* handle, int status) {
+ ASSERT(status == 0);
+ timer_close_cb_calls++;
+}
+
+
+static void timer_cb(uv_handle_t* handle, int status) {
+ ASSERT(status == 0);
+ timer_cb_calls++;
+
+ /*
+ * These are the important asserts. The connection callback has been made,
+ * but libuv hasn't automatically closed the socket. The user must
+ * uv_close the handle manually.
+ */
+ ASSERT(close_cb_calls == 0);
+ ASSERT(connect_cb_calls == 1);
+
+ /* Close the tcp handle. */
+ uv_close((uv_handle_t*)&tcp);
+
+ /* Close the timer. */
+ uv_close(handle);
+}
+
+
+static void on_connect_with_close(uv_req_t *req, int status) {
ASSERT(status == -1);
ASSERT(uv_last_error().code == UV_ECONNREFUSED);
connect_cb_calls++;
+
+ ASSERT(close_cb_calls == 0);
uv_close(req->handle);
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf = {0, 0};
- FATAL("alloc should not be called");
- return buf;
+static void on_connect_without_close(uv_req_t *req, int status) {
+ ASSERT(status == -1);
+ ASSERT(uv_last_error().code == UV_ECONNREFUSED);
+ connect_cb_calls++;
+
+ uv_timer_start(&timer, timer_cb, 100, 0);
+
+ ASSERT(close_cb_calls == 0);
}
-TEST_IMPL(connection_fail) {
+void connection_fail(uv_connect_cb connect_cb) {
struct sockaddr_in client_addr, server_addr;
int r;
- uv_init(alloc_cb);
-
client_addr = uv_ip4_addr("0.0.0.0", 0);
/* There should be no servers listening on this port. */
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
- r = uv_tcp_init(&handle, on_close, NULL);
+ r = uv_tcp_init(&tcp, on_close, NULL);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
- uv_req_init(&req, &handle, on_connect);
+ uv_req_init(&req, (uv_handle_t*)&tcp, connect_cb);
- uv_bind(&handle, (struct sockaddr*)&client_addr);
- r = uv_connect(&req, (struct sockaddr*)&server_addr);
+ uv_bind(&tcp, client_addr);
+ r = uv_connect(&req, server_addr);
ASSERT(!r);
uv_run();
ASSERT(connect_cb_calls == 1);
ASSERT(close_cb_calls == 1);
+}
+
+
+/*
+ * This test attempts to connect to a port where no server is running. We
+ * expect an error.
+ */
+TEST_IMPL(connection_fail) {
+ uv_init();
+
+ connection_fail(on_connect_with_close);
+
+ ASSERT(timer_close_cb_calls == 0);
+ ASSERT(timer_cb_calls == 0);
+
+ return 0;
+}
+
+
+/*
+ * This test is the same as the first except it check that the close
+ * callback of the tcp handle hasn't been made after the failed connection
+ * attempt.
+ */
+TEST_IMPL(connection_fail_doesnt_auto_close) {
+ uv_init();
+
+ uv_timer_init(&timer, timer_close_cb, NULL);
+
+ connection_fail(on_connect_without_close);
+
+ ASSERT(timer_close_cb_calls == 1);
+ ASSERT(timer_cb_calls == 1);
return 0;
}
diff --git a/deps/uv/test/test-delayed-accept.c b/deps/uv/test/test-delayed-accept.c
index e372b955c4..064fe32716 100644
--- a/deps/uv/test/test-delayed-accept.c
+++ b/deps/uv/test/test-delayed-accept.c
@@ -27,12 +27,20 @@
static char BUFFER[1024];
-static int accept_cb_called = 0;
+static int connection_cb_called = 0;
static int do_accept_called = 0;
static int close_cb_called = 0;
static int connect_cb_called = 0;
+static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
+ uv_buf_t buf;
+ buf.base = (char*)malloc(size);
+ buf.len = size;
+ return buf;
+}
+
+
static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL);
ASSERT(status == 0);
@@ -44,27 +52,27 @@ static void close_cb(uv_handle_t* handle, int status) {
static void do_accept(uv_handle_t* timer_handle, int status) {
- uv_handle_t* server;
- uv_handle_t* accepted_handle = (uv_handle_t*)malloc(sizeof *accepted_handle);
+ uv_tcp_t* server;
+ uv_tcp_t* accepted_handle = (uv_tcp_t*)malloc(sizeof *accepted_handle);
int r;
ASSERT(timer_handle != NULL);
ASSERT(status == 0);
ASSERT(accepted_handle != NULL);
- server = (uv_handle_t*)timer_handle->data;
+ server = (uv_tcp_t*)timer_handle->data;
r = uv_accept(server, accepted_handle, close_cb, NULL);
ASSERT(r == 0);
do_accept_called++;
/* Immediately close the accepted handle. */
- r = uv_close(accepted_handle);
+ r = uv_close((uv_handle_t*)accepted_handle);
ASSERT(r == 0);
/* After accepting the two clients close the server handle */
if (do_accept_called == 2) {
- r = uv_close(server);
+ r = uv_close((uv_handle_t*)server);
ASSERT(r == 0);
}
@@ -74,26 +82,28 @@ static void do_accept(uv_handle_t* timer_handle, int status) {
}
-static void accept_cb(uv_handle_t* handle) {
+static void connection_cb(uv_tcp_t* tcp, int status) {
int r;
- uv_handle_t* timer_handle;
+ uv_timer_t* timer_handle;
+
+ ASSERT(status == 0);
- timer_handle = (uv_handle_t*)malloc(sizeof *timer_handle);
+ timer_handle = (uv_timer_t*)malloc(sizeof *timer_handle);
ASSERT(timer_handle != NULL);
/* Accept the client after 1 second */
- r = uv_timer_init(timer_handle, close_cb, (void*)handle);
+ r = uv_timer_init(timer_handle, close_cb, (void*)tcp);
ASSERT(r == 0);
r = uv_timer_start(timer_handle, do_accept, 1000, 0);
ASSERT(r == 0);
- accept_cb_called++;
+ connection_cb_called++;
}
static void start_server() {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
- uv_handle_t* server = (uv_handle_t*)malloc(sizeof *server);
+ uv_tcp_t* server = (uv_tcp_t*)malloc(sizeof *server);
int r;
ASSERT(server != NULL);
@@ -101,17 +111,17 @@ static void start_server() {
r = uv_tcp_init(server, close_cb, NULL);
ASSERT(r == 0);
- r = uv_bind(server, (struct sockaddr*) &addr);
+ r = uv_bind(server, addr);
ASSERT(r == 0);
- r = uv_listen(server, 128, accept_cb);
+ r = uv_listen(server, 128, connection_cb);
ASSERT(r == 0);
}
-static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
+static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
/* The server will not send anything, it should close gracefully. */
- ASSERT(handle != NULL);
+ ASSERT(tcp != NULL);
ASSERT(nread == -1);
ASSERT(uv_last_error().code == UV_EOF);
@@ -119,7 +129,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base);
}
- uv_close(handle);
+ uv_close((uv_handle_t*)tcp);
}
@@ -131,7 +141,7 @@ static void connect_cb(uv_req_t* req, int status) {
/* Not that the server will send anything, but otherwise we'll never know */
/* when te server closes the connection. */
- r = uv_read_start(req->handle, read_cb);
+ r = uv_read_start((uv_tcp_t*)(req->handle), alloc_cb, read_cb);
ASSERT(r == 0);
connect_cb_called++;
@@ -142,7 +152,7 @@ static void connect_cb(uv_req_t* req, int status) {
static void client_connect() {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
- uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
+ uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client);
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r;
@@ -152,23 +162,15 @@ static void client_connect() {
r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0);
- uv_req_init(connect_req, client, connect_cb);
- r = uv_connect(connect_req, (struct sockaddr*)&addr);
+ uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);
+ r = uv_connect(connect_req, addr);
ASSERT(r == 0);
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf;
- buf.base = (char*)malloc(size);
- buf.len = size;
- return buf;
-}
-
-
TEST_IMPL(delayed_accept) {
- uv_init(alloc_cb);
+ uv_init();
start_server();
@@ -177,7 +179,7 @@ TEST_IMPL(delayed_accept) {
uv_run();
- ASSERT(accept_cb_called == 2);
+ ASSERT(connection_cb_called == 2);
ASSERT(do_accept_called == 2);
ASSERT(connect_cb_called == 2);
ASSERT(close_cb_called == 7);
diff --git a/deps/uv/test/test-get-currentexe.c b/deps/uv/test/test-get-currentexe.c
new file mode 100644
index 0000000000..880d9cd6d0
--- /dev/null
+++ b/deps/uv/test/test-get-currentexe.c
@@ -0,0 +1,53 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "../uv.h"
+#include "task.h"
+#include <string.h>
+
+#define PATHMAX 1024
+extern char executable_path[];
+
+TEST_IMPL(get_currentexe) {
+ char buffer[PATHMAX];
+ size_t size;
+ char* match;
+ int r;
+
+ size = sizeof(buffer) / sizeof(buffer[0]);
+ r = uv_get_exepath(buffer, &size);
+ ASSERT(!r);
+
+ match = strstr(buffer, executable_path);
+ /* Verify that the path returned from uv_get_exepath is a subdirectory of executable_path */
+ ASSERT(match && !strcmp(match, executable_path));
+ ASSERT(size == strlen(buffer));
+
+ /* Negative tests */
+ size = sizeof(buffer) / sizeof(buffer[0]);
+ r = uv_get_exepath(NULL, &size);
+ ASSERT(r == -1);
+
+ r = uv_get_exepath(buffer, NULL);
+ ASSERT(r == -1);
+
+ return 0;
+}
diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h
index 499d6503c7..c3cb252246 100644
--- a/deps/uv/test/test-list.h
+++ b/deps/uv/test/test-list.h
@@ -28,6 +28,8 @@ TEST_DECLARE (bind_error_addrnotavail_2)
TEST_DECLARE (bind_error_fault)
TEST_DECLARE (bind_error_inval)
TEST_DECLARE (connection_fail)
+TEST_DECLARE (connection_fail_doesnt_auto_close)
+TEST_DECLARE (shutdown_eof)
TEST_DECLARE (callback_stack)
TEST_DECLARE (timer)
TEST_DECLARE (timer_again)
@@ -38,6 +40,7 @@ TEST_DECLARE (async_ref)
TEST_DECLARE (prepare_ref)
TEST_DECLARE (check_ref)
TEST_DECLARE (async)
+TEST_DECLARE (get_currentexe)
TEST_DECLARE (fail_always)
TEST_DECLARE (pass_always)
HELPER_DECLARE (echo_server)
@@ -62,6 +65,10 @@ TASK_LIST_START
TEST_ENTRY (bind_error_inval)
TEST_ENTRY (connection_fail)
+ TEST_ENTRY (connection_fail_doesnt_auto_close)
+
+ TEST_ENTRY (shutdown_eof)
+ TEST_HELPER (shutdown_eof, echo_server)
TEST_ENTRY (callback_stack)
TEST_HELPER (callback_stack, echo_server)
@@ -80,6 +87,8 @@ TASK_LIST_START
TEST_ENTRY (async)
+ TEST_ENTRY (get_currentexe)
+
#if 0
/* These are for testing the test runner. */
TEST_ENTRY (fail_always)
diff --git a/deps/uv/test/test-loop-handles.c b/deps/uv/test/test-loop-handles.c
index 77055b43ef..89d10e5ed2 100644
--- a/deps/uv/test/test-loop-handles.c
+++ b/deps/uv/test/test-loop-handles.c
@@ -75,15 +75,15 @@
#define TIMEOUT 100
-static uv_handle_t prepare_1_handle;
-static uv_handle_t prepare_2_handle;
+static uv_prepare_t prepare_1_handle;
+static uv_prepare_t prepare_2_handle;
-static uv_handle_t check_handle;
+static uv_check_t check_handle;
-static uv_handle_t idle_1_handles[IDLE_COUNT];
-static uv_handle_t idle_2_handle;
+static uv_idle_t idle_1_handles[IDLE_COUNT];
+static uv_idle_t idle_2_handle;
-static uv_handle_t timer_handle;
+static uv_timer_t timer_handle;
static int loop_iteration = 0;
@@ -110,7 +110,7 @@ static int timer_cb_called = 0;
static void timer_cb(uv_handle_t* handle, int status) {
- ASSERT(handle == &timer_handle);
+ ASSERT(handle == (uv_handle_t*)&timer_handle);
ASSERT(status == 0);
timer_cb_called++;
@@ -127,7 +127,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
LOG("IDLE_2_CB\n");
- ASSERT(handle == &idle_2_handle);
+ ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0);
idle_2_cb_called++;
@@ -140,7 +140,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
static void idle_2_close_cb(uv_handle_t* handle, int status){
LOG("IDLE_2_CLOSE_CB\n");
- ASSERT(handle == &idle_2_handle);
+ ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0);
ASSERT(idle_2_is_active);
@@ -173,7 +173,7 @@ static void idle_1_cb(uv_handle_t* handle, int status) {
idle_1_cb_called++;
if (idle_1_cb_called % 5 == 0) {
- r = uv_idle_stop(handle);
+ r = uv_idle_stop((uv_idle_t*)handle);
ASSERT(r == 0);
idles_1_active--;
}
@@ -195,7 +195,7 @@ static void check_cb(uv_handle_t* handle, int status) {
LOG("CHECK_CB\n");
- ASSERT(handle == &check_handle);
+ ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0);
/* XXX
@@ -213,22 +213,22 @@ static void check_cb(uv_handle_t* handle, int status) {
} else {
/* End of the test - close all handles */
- r = uv_close(&prepare_1_handle);
+ r = uv_close((uv_handle_t*)&prepare_1_handle);
ASSERT(r == 0);
- r = uv_close(&check_handle);
+ r = uv_close((uv_handle_t*)&check_handle);
ASSERT(r == 0);
- r = uv_close(&prepare_2_handle);
+ r = uv_close((uv_handle_t*)&prepare_2_handle);
ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) {
- r = uv_close(&idle_1_handles[i]);
+ r = uv_close((uv_handle_t*)&idle_1_handles[i]);
ASSERT(r == 0);
}
/* This handle is closed/recreated every time, close it only if it is */
/* active.*/
if (idle_2_is_active) {
- r = uv_close(&idle_2_handle);
+ r = uv_close((uv_handle_t*)&idle_2_handle);
ASSERT(r == 0);
}
}
@@ -239,7 +239,7 @@ static void check_cb(uv_handle_t* handle, int status) {
static void check_close_cb(uv_handle_t* handle, int status){
LOG("CHECK_CLOSE_CB\n");
- ASSERT(handle == &check_handle);
+ ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0);
check_close_cb_called++;
@@ -251,7 +251,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CB\n");
- ASSERT(handle == &prepare_2_handle);
+ ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0);
/* XXX ASSERT(idles_1_active == 0); */
@@ -263,7 +263,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
/* (loop_iteration % 2 == 0) cannot be true. */
ASSERT(loop_iteration % 2 != 0);
- r = uv_prepare_stop(handle);
+ r = uv_prepare_stop((uv_prepare_t*)handle);
ASSERT(r == 0);
prepare_2_cb_called++;
@@ -272,7 +272,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
static void prepare_2_close_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CLOSE_CB\n");
- ASSERT(handle == &prepare_2_handle);
+ ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0);
prepare_2_close_cb_called++;
@@ -284,7 +284,7 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_1_CB\n");
- ASSERT(handle == &prepare_1_handle);
+ ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0);
/* XXX
@@ -306,25 +306,18 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
static void prepare_1_close_cb(uv_handle_t* handle, int status){
LOG("PREPARE_1_CLOSE_CB");
- ASSERT(handle == &prepare_1_handle);
+ ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0);
prepare_1_close_cb_called++;
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t rv = { 0, 0 };
- FATAL("alloc_cb should never be called in this test");
- return rv;
-}
-
-
TEST_IMPL(loop_handles) {
int i;
int r;
- uv_init(alloc_cb);
+ uv_init();
r = uv_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
ASSERT(r == 0);
@@ -387,50 +380,50 @@ TEST_IMPL(loop_handles) {
TEST_IMPL(ref) {
- uv_init(alloc_cb);
+ uv_init();
uv_run();
return 0;
}
TEST_IMPL(idle_ref) {
- uv_handle_t h;
- uv_init(alloc_cb);
+ uv_idle_t h;
+ uv_init();
uv_idle_init(&h, NULL, NULL);
uv_idle_start(&h, NULL);
- uv_unref(&h);
+ uv_unref();
uv_run();
return 0;
}
TEST_IMPL(async_ref) {
- uv_handle_t h;
- uv_init(alloc_cb);
+ uv_async_t h;
+ uv_init();
uv_async_init(&h, NULL, NULL, NULL);
- uv_unref(&h);
+ uv_unref();
uv_run();
return 0;
}
TEST_IMPL(prepare_ref) {
- uv_handle_t h;
- uv_init(alloc_cb);
+ uv_prepare_t h;
+ uv_init();
uv_prepare_init(&h, NULL, NULL);
uv_prepare_start(&h, NULL);
- uv_unref(&h);
+ uv_unref();
uv_run();
return 0;
}
TEST_IMPL(check_ref) {
- uv_handle_t h;
- uv_init(alloc_cb);
+ uv_check_t h;
+ uv_init();
uv_check_init(&h, NULL, NULL);
uv_check_start(&h, NULL);
- uv_unref(&h);
+ uv_unref();
uv_run();
return 0;
}
diff --git a/deps/uv/test/test-ping-pong.c b/deps/uv/test/test-ping-pong.c
index 13d114e32d..02ef56c12d 100644
--- a/deps/uv/test/test-ping-pong.c
+++ b/deps/uv/test/test-ping-pong.c
@@ -39,7 +39,7 @@ static char PING[] = "PING\n";
typedef struct {
int pongs;
int state;
- uv_handle_t handle;
+ uv_tcp_t tcp;
uv_req_t connect_req;
uv_req_t read_req;
char read_buffer[BUFSIZE];
@@ -48,6 +48,14 @@ typedef struct {
void pinger_try_read(pinger_t* pinger);
+static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
+ uv_buf_t buf;
+ buf.base = (char*)malloc(size);
+ buf.len = size;
+ return buf;
+}
+
+
static void pinger_on_close(uv_handle_t* handle, int status) {
pinger_t* pinger = (pinger_t*)handle->data;
@@ -75,7 +83,7 @@ static void pinger_write_ping(pinger_t* pinger) {
buf.len = strlen(PING);
req = (uv_req_t*)malloc(sizeof(*req));
- uv_req_init(req, &pinger->handle, pinger_after_write);
+ uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_after_write);
if (uv_write(req, &buf, 1)) {
FATAL("uv_write failed");
@@ -85,11 +93,11 @@ static void pinger_write_ping(pinger_t* pinger) {
}
-static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
+static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
unsigned int i;
pinger_t* pinger;
- pinger = (pinger_t*)handle->data;
+ pinger = (pinger_t*)tcp->data;
if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF);
@@ -100,7 +108,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base);
}
- uv_close(&pinger->handle);
+ uv_close((uv_handle_t*)(&pinger->tcp));
return;
}
@@ -115,7 +123,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (pinger->pongs < NUM_PINGS) {
pinger_write_ping(pinger);
} else {
- uv_close(&pinger->handle);
+ uv_close((uv_handle_t*)(&pinger->tcp));
return;
}
}
@@ -130,7 +138,7 @@ static void pinger_on_connect(uv_req_t *req, int status) {
pinger_write_ping(pinger);
- uv_read_start(req->handle, pinger_read_cb);
+ uv_read_start((uv_tcp_t*)(req->handle), alloc_cb, pinger_read_cb);
}
@@ -144,28 +152,21 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
- r = uv_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger);
+ r = uv_tcp_init(&pinger->tcp, pinger_on_close, (void*)pinger);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
- uv_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect);
+ uv_req_init(&pinger->connect_req, (uv_handle_t*)(&pinger->tcp),
+ pinger_on_connect);
- r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
+ r = uv_connect(&pinger->connect_req, server_addr);
ASSERT(!r);
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf;
- buf.base = (char*)malloc(size);
- buf.len = size;
- return buf;
-}
-
-
TEST_IMPL(ping_pong) {
- uv_init(alloc_cb);
+ uv_init();
pinger_new();
uv_run();
diff --git a/deps/uv/test/test-shutdown-eof.c b/deps/uv/test/test-shutdown-eof.c
new file mode 100644
index 0000000000..ebc94f4d0b
--- /dev/null
+++ b/deps/uv/test/test-shutdown-eof.c
@@ -0,0 +1,176 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "../uv.h"
+#include "task.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+static uv_timer_t timer;
+static uv_tcp_t tcp;
+static uv_req_t connect_req, write_req, shutdown_req;
+static uv_buf_t qbuf;
+static int got_q;
+static int got_eof;
+static int called_connect_cb;
+static int called_shutdown_cb;
+static int called_tcp_close_cb;
+static int called_timer_close_cb;
+static int called_timer_cb;
+
+
+static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
+ uv_buf_t buf;
+ buf.base = (char*)malloc(size);
+ buf.len = size;
+ return buf;
+}
+
+
+static void read_cb(uv_tcp_t* t, int nread, uv_buf_t buf) {
+ ASSERT(t == &tcp);
+
+ if (!got_q) {
+ ASSERT(nread == 1);
+ ASSERT(!got_eof);
+ ASSERT(buf.base[0] == 'Q');
+ free(buf.base);
+ got_q = 1;
+ puts("got Q");
+ } else {
+ ASSERT(uv_last_error().code == UV_EOF);
+ if (buf.base) {
+ free(buf.base);
+ }
+ got_eof = 1;
+ puts("got EOF");
+ }
+}
+
+
+static void shutdown_cb(uv_req_t *req, int status) {
+ ASSERT(req == &shutdown_req);
+
+ ASSERT(called_connect_cb == 1);
+ ASSERT(!got_eof);
+ ASSERT(called_tcp_close_cb == 0);
+ ASSERT(called_timer_close_cb == 0);
+ ASSERT(called_timer_cb == 0);
+
+ called_shutdown_cb++;
+}
+
+
+static void connect_cb(uv_req_t *req, int status) {
+ ASSERT(status == 0);
+ ASSERT(req == &connect_req);
+
+ /* Start reading from our connection so we can receive the EOF. */
+ uv_read_start(&tcp, alloc_cb, read_cb);
+
+ /*
+ * Write the letter 'Q' to gracefully kill the echo-server. This will not
+ * effect our connection.
+ */
+ uv_req_init(&write_req, (uv_handle_t*)&tcp, NULL);
+ uv_write(&write_req, &qbuf, 1);
+
+ /* Shutdown our end of the connection. */
+ uv_req_init(&shutdown_req, (uv_handle_t*)&tcp, shutdown_cb);
+ uv_shutdown(&shutdown_req);
+
+ called_connect_cb++;
+ ASSERT(called_shutdown_cb == 0);
+}
+
+
+void tcp_close_cb(uv_handle_t* handle, int status) {
+ ASSERT(handle == (uv_handle_t*) &tcp);
+
+ ASSERT(called_connect_cb == 1);
+ ASSERT(got_q);
+ ASSERT(got_eof);
+ ASSERT(called_timer_cb == 1);
+
+ called_tcp_close_cb++;
+}
+
+
+void timer_close_cb(uv_handle_t* handle, int status) {
+ ASSERT(handle == (uv_handle_t*) &timer);
+ called_timer_close_cb++;
+}
+
+
+void timer_cb(uv_handle_t* handle, int status) {
+ ASSERT(handle == (uv_handle_t*) &timer);
+ uv_close(handle);
+
+ /*
+ * The most important assert of the test: we have not received
+ * tcp_close_cb yet.
+ */
+ ASSERT(called_tcp_close_cb == 0);
+ uv_close((uv_handle_t*) &tcp);
+
+ called_timer_cb++;
+}
+
+
+/*
+ * This test has a client which connects to the echo_server and immediately
+ * issues a shutdown. The echo-server, in response, will also shutdown their
+ * connection. We check, with a timer, that libuv is not automatically
+ * calling uv_close when the client receives the EOF from echo-server.
+ */
+TEST_IMPL(shutdown_eof) {
+ struct sockaddr_in server_addr;
+ int r;
+
+ uv_init();
+
+ qbuf.base = "Q";
+ qbuf.len = 1;
+
+ uv_timer_init(&timer, timer_close_cb, NULL);
+ uv_timer_start(&timer, timer_cb, 100, 0);
+
+ server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
+ r = uv_tcp_init(&tcp, tcp_close_cb, NULL);
+ ASSERT(!r);
+
+ uv_req_init(&connect_req, (uv_handle_t*) &tcp, connect_cb);
+ r = uv_connect(&connect_req, server_addr);
+ ASSERT(!r);
+
+ uv_run();
+
+ ASSERT(called_connect_cb == 1);
+ ASSERT(called_shutdown_cb == 1);
+ ASSERT(got_eof);
+ ASSERT(got_q);
+ ASSERT(called_tcp_close_cb == 1);
+ ASSERT(called_timer_close_cb == 1);
+ ASSERT(called_timer_cb == 1);
+
+ return 0;
+}
+
diff --git a/deps/uv/test/test-tcp-writealot.c b/deps/uv/test/test-tcp-writealot.c
index 15cc2d5b6f..f31057d757 100644
--- a/deps/uv/test/test-tcp-writealot.c
+++ b/deps/uv/test/test-tcp-writealot.c
@@ -45,6 +45,14 @@ static int bytes_received = 0;
static int bytes_received_done = 0;
+static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
+ uv_buf_t buf;
+ buf.base = (char*)malloc(size);
+ buf.len = size;
+ return buf;
+}
+
+
static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL);
ASSERT(status == 0);
@@ -56,11 +64,15 @@ static void close_cb(uv_handle_t* handle, int status) {
static void shutdown_cb(uv_req_t* req, int status) {
+ uv_tcp_t* tcp;
+
ASSERT(req);
ASSERT(status == 0);
+ tcp = (uv_tcp_t*)(req->handle);
+
/* The write buffer should be empty by now. */
- ASSERT(req->handle->write_queue_size == 0);
+ ASSERT(tcp->write_queue_size == 0);
/* Now we wait for the EOF */
shutdown_cb_called++;
@@ -72,8 +84,8 @@ static void shutdown_cb(uv_req_t* req, int status) {
}
-static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
- ASSERT(handle != NULL);
+static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
+ ASSERT(tcp != NULL);
if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF);
@@ -83,7 +95,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base);
}
- uv_close(handle);
+ uv_close((uv_handle_t*)tcp);
return;
}
@@ -111,13 +123,13 @@ static void write_cb(uv_req_t* req, int status) {
static void connect_cb(uv_req_t* req, int status) {
uv_buf_t send_bufs[CHUNKS_PER_WRITE];
- uv_handle_t* handle;
+ uv_tcp_t* tcp;
int i, j, r;
ASSERT(req != NULL);
ASSERT(status == 0);
- handle = req->handle;
+ tcp = (uv_tcp_t*)req->handle;
connect_cb_called++;
free(req);
@@ -133,7 +145,7 @@ static void connect_cb(uv_req_t* req, int status) {
req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL);
- uv_req_init(req, handle, write_cb);
+ uv_req_init(req, (uv_handle_t*)tcp, write_cb);
r = uv_write(req, (uv_buf_t*)&send_bufs, CHUNKS_PER_WRITE);
ASSERT(r == 0);
}
@@ -141,7 +153,7 @@ static void connect_cb(uv_req_t* req, int status) {
/* Shutdown on drain. FIXME: dealloc req? */
req = (uv_req_t*) malloc(sizeof(uv_req_t));
ASSERT(req != NULL);
- uv_req_init(req, handle, shutdown_cb);
+ uv_req_init(req, (uv_handle_t*)tcp, shutdown_cb);
r = uv_shutdown(req);
ASSERT(r == 0);
@@ -149,23 +161,15 @@ static void connect_cb(uv_req_t* req, int status) {
req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL);
- uv_req_init(req, handle, read_cb);
- r = uv_read_start(handle, read_cb);
+ uv_req_init(req, (uv_handle_t*)tcp, read_cb);
+ r = uv_read_start(tcp, alloc_cb, read_cb);
ASSERT(r == 0);
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf;
- buf.base = (char*)malloc(size);
- buf.len = size;
- return buf;
-}
-
-
TEST_IMPL(tcp_writealot) {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
- uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
+ uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client);
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r;
@@ -176,13 +180,13 @@ TEST_IMPL(tcp_writealot) {
ASSERT(send_buffer != NULL);
- uv_init(alloc_cb);
+ uv_init();
r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0);
- uv_req_init(connect_req, client, connect_cb);
- r = uv_connect(connect_req, (struct sockaddr*)&addr);
+ uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);
+ r = uv_connect(connect_req, addr);
ASSERT(r == 0);
uv_run();
diff --git a/deps/uv/test/test-timer-again.c b/deps/uv/test/test-timer-again.c
index 1585c09d6c..0844bc3a45 100644
--- a/deps/uv/test/test-timer-again.c
+++ b/deps/uv/test/test-timer-again.c
@@ -29,7 +29,7 @@ static int repeat_2_cb_called = 0;
static int repeat_2_cb_allowed = 0;
-static uv_handle_t dummy, repeat_1, repeat_2;
+static uv_timer_t dummy, repeat_1, repeat_2;
static int64_t start_time;
@@ -45,10 +45,10 @@ static void close_cb(uv_handle_t* handle, int status) {
static void repeat_1_cb(uv_handle_t* handle, int status) {
int r;
- ASSERT(handle == &repeat_1);
+ ASSERT(handle == (uv_handle_t*)&repeat_1);
ASSERT(status == 0);
- ASSERT(uv_timer_get_repeat(handle) == 50);
+ ASSERT(uv_timer_get_repeat((uv_timer_t*)handle) == 50);
LOGF("repeat_1_cb called after %ld ms\n", (long int)(uv_now() - start_time));
@@ -68,7 +68,7 @@ static void repeat_1_cb(uv_handle_t* handle, int status) {
static void repeat_2_cb(uv_handle_t* handle, int status) {
- ASSERT(handle == &repeat_2);
+ ASSERT(handle == (uv_handle_t*) &repeat_2);
ASSERT(status == 0);
ASSERT(repeat_2_cb_allowed);
@@ -76,31 +76,25 @@ static void repeat_2_cb(uv_handle_t* handle, int status) {
repeat_2_cb_called++;
- if (uv_timer_get_repeat(handle) == 0) {
+ if (uv_timer_get_repeat(&repeat_2) == 0) {
ASSERT(!uv_is_active(handle));
uv_close(handle);
return;
}
- LOGF("uv_timer_get_repeat %ld ms\n", (long int)uv_timer_get_repeat(handle));
- ASSERT(uv_timer_get_repeat(handle) == 100);
+ LOGF("uv_timer_get_repeat %ld ms\n",
+ (long int)uv_timer_get_repeat(&repeat_2));
+ ASSERT(uv_timer_get_repeat(&repeat_2) == 100);
/* This shouldn't take effect immediately. */
uv_timer_set_repeat(&repeat_2, 0);
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf = {0, 0};
- FATAL("alloc should not be called");
- return buf;
-}
-
-
TEST_IMPL(timer_again) {
int r;
- uv_init(alloc_cb);
+ uv_init();
start_time = uv_now();
ASSERT(0 < start_time);
diff --git a/deps/uv/test/test-timer.c b/deps/uv/test/test-timer.c
index 15c80def2b..049835ed7b 100644
--- a/deps/uv/test/test-timer.c
+++ b/deps/uv/test/test-timer.c
@@ -92,26 +92,19 @@ static void never_cb(uv_handle_t* handle, int status) {
}
-static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
- uv_buf_t buf = {0, 0};
- FATAL("alloc should not be called");
- return buf;
-}
-
-
TEST_IMPL(timer) {
- uv_handle_t *once;
- uv_handle_t repeat, never;
+ uv_timer_t *once;
+ uv_timer_t repeat, never;
int i, r;
- uv_init(alloc_cb);
+ uv_init();
start_time = uv_now();
ASSERT(0 < start_time);
/* Let 10 timers time out in 500 ms total. */
for (i = 0; i < 10; i++) {
- once = (uv_handle_t*)malloc(sizeof(*once));
+ once = (uv_timer_t*)malloc(sizeof(*once));
ASSERT(once != NULL);
r = uv_timer_init(once, once_close_cb, NULL);
ASSERT(r == 0);
diff --git a/deps/uv/uv-unix.c b/deps/uv/uv-unix.c
index c92e6492d3..0a8cb24ebf 100644
--- a/deps/uv/uv-unix.c
+++ b/deps/uv/uv-unix.c
@@ -31,16 +31,23 @@
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
+#include <limits.h> /* PATH_MAX */
+#if defined(__APPLE__)
+#include <mach-o/dyld.h> /* _NSGetExecutablePath */
+#endif
+
+#if defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#endif
static uv_err_t last_err;
-static uv_alloc_cb alloc_cb;
void uv__tcp_io(EV_P_ ev_io* watcher, int revents);
void uv__next(EV_P_ ev_idle* watcher, int revents);
-static void uv__tcp_connect(uv_handle_t* handle);
-int uv_tcp_open(uv_handle_t*, int fd);
+static void uv__tcp_connect(uv_tcp_t*);
+int uv_tcp_open(uv_tcp_t*, int fd);
static void uv__finish_close(uv_handle_t* handle);
@@ -50,7 +57,7 @@ enum {
UV_CLOSED = 0x00000002, /* close(2) finished. */
UV_READING = 0x00000004, /* uv_read_start() called. */
UV_SHUTTING = 0x00000008, /* uv_shutdown() called but not complete. */
- UV_SHUT = 0x00000010, /* Write side closed. */
+ UV_SHUT = 0x00000010 /* Write side closed. */
};
@@ -126,34 +133,41 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port) {
int uv_close(uv_handle_t* handle) {
+ uv_tcp_t* tcp;
+ uv_async_t* async;
+ uv_timer_t* timer;
+
switch (handle->type) {
case UV_TCP:
- ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
- ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
+ tcp = (uv_tcp_t*) handle;
+ ev_io_stop(EV_DEFAULT_ &tcp->write_watcher);
+ ev_io_stop(EV_DEFAULT_ &tcp->read_watcher);
break;
case UV_PREPARE:
- uv_prepare_stop(handle);
+ uv_prepare_stop((uv_prepare_t*) handle);
break;
case UV_CHECK:
- uv_check_stop(handle);
+ uv_check_stop((uv_check_t*) handle);
break;
case UV_IDLE:
- uv_idle_stop(handle);
+ uv_idle_stop((uv_idle_t*) handle);
break;
case UV_ASYNC:
- ev_async_stop(EV_DEFAULT_ &handle->async_watcher);
+ async = (uv_async_t*)handle;
+ ev_async_stop(EV_DEFAULT_ &async->async_watcher);
ev_ref(EV_DEFAULT_UC);
break;
case UV_TIMER:
- if (ev_is_active(&handle->timer_watcher)) {
+ timer = (uv_timer_t*)handle;
+ if (ev_is_active(&timer->timer_watcher)) {
ev_ref(EV_DEFAULT_UC);
}
- ev_timer_stop(EV_DEFAULT_ &handle->timer_watcher);
+ ev_timer_stop(EV_DEFAULT_ &timer->timer_watcher);
break;
default:
@@ -172,11 +186,8 @@ int uv_close(uv_handle_t* handle) {
}
-void uv_init(uv_alloc_cb cb) {
- assert(cb);
- alloc_cb = cb;
-
- // Initialize the default ev loop.
+void uv_init() {
+ /* Initialize the default ev loop. */
#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060
ev_default_loop(EVBACKEND_KQUEUE);
#else
@@ -206,72 +217,67 @@ static void uv__handle_init(uv_handle_t* handle, uv_handle_type type,
}
-int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
- void* data) {
- uv__handle_init(handle, UV_TCP, close_cb, data);
+int uv_tcp_init(uv_tcp_t* tcp, uv_close_cb close_cb, void* data) {
+ uv__handle_init((uv_handle_t*)tcp, UV_TCP, close_cb, data);
- handle->connect_req = NULL;
- handle->accepted_fd = -1;
- handle->fd = -1;
- handle->delayed_error = 0;
- ngx_queue_init(&handle->write_queue);
- handle->write_queue_size = 0;
+ tcp->alloc_cb = NULL;
+ tcp->connect_req = NULL;
+ tcp->accepted_fd = -1;
+ tcp->fd = -1;
+ tcp->delayed_error = 0;
+ ngx_queue_init(&tcp->write_queue);
+ tcp->write_queue_size = 0;
- ev_init(&handle->read_watcher, uv__tcp_io);
- handle->read_watcher.data = handle;
+ ev_init(&tcp->read_watcher, uv__tcp_io);
+ tcp->read_watcher.data = tcp;
- ev_init(&handle->write_watcher, uv__tcp_io);
- handle->write_watcher.data = handle;
+ ev_init(&tcp->write_watcher, uv__tcp_io);
+ tcp->write_watcher.data = tcp;
- assert(ngx_queue_empty(&handle->write_queue));
- assert(handle->write_queue_size == 0);
+ assert(ngx_queue_empty(&tcp->write_queue));
+ assert(tcp->write_queue_size == 0);
return 0;
}
-int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
- int addrsize;
- int domain;
+int uv_bind(uv_tcp_t* tcp, struct sockaddr_in addr) {
+ int addrsize = sizeof(struct sockaddr_in);
+ int domain = AF_INET;
int r;
- if (handle->fd <= 0) {
+ if (tcp->fd <= 0) {
int fd = socket(AF_INET, SOCK_STREAM, 0);
+
if (fd < 0) {
- uv_err_new(handle, errno);
+ uv_err_new((uv_handle_t*)tcp, errno);
return -1;
}
- if (uv_tcp_open(handle, fd)) {
+ if (uv_tcp_open(tcp, fd)) {
close(fd);
return -2;
}
}
- assert(handle->fd >= 0);
+ assert(tcp->fd >= 0);
- if (addr->sa_family == AF_INET) {
- addrsize = sizeof(struct sockaddr_in);
- domain = AF_INET;
- } else if (addr->sa_family == AF_INET6) {
- addrsize = sizeof(struct sockaddr_in6);
- domain = AF_INET6;
- } else {
- uv_err_new(handle, EFAULT);
+ if (addr.sin_family != AF_INET) {
+ uv_err_new((uv_handle_t*)tcp, EFAULT);
return -1;
}
- r = bind(handle->fd, addr, addrsize);
- handle->delayed_error = 0;
+ r = bind(tcp->fd, (struct sockaddr*) &addr, addrsize);
+ tcp->delayed_error = 0;
if (r) {
switch (errno) {
case EADDRINUSE:
- handle->delayed_error = errno;
+ tcp->delayed_error = errno;
return 0;
default:
- uv_err_new(handle, errno);
+ uv_err_new((uv_handle_t*)tcp, errno);
return -1;
}
}
@@ -280,13 +286,16 @@ int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
}
-int uv_tcp_open(uv_handle_t* handle, int fd) {
+int uv_tcp_open(uv_tcp_t* tcp, int fd) {
+ int yes;
+ int r;
+
assert(fd >= 0);
- handle->fd = fd;
+ tcp->fd = fd;
/* Set non-blocking. */
- int yes = 1;
- int r = fcntl(fd, F_SETFL, O_NONBLOCK);
+ yes = 1;
+ r = fcntl(fd, F_SETFL, O_NONBLOCK);
assert(r == 0);
/* Reuse the port address. */
@@ -294,39 +303,40 @@ int uv_tcp_open(uv_handle_t* handle, int fd) {
assert(r == 0);
/* Associate the fd with each ev_io watcher. */
- ev_io_set(&handle->read_watcher, fd, EV_READ);
- ev_io_set(&handle->write_watcher, fd, EV_WRITE);
+ ev_io_set(&tcp->read_watcher, fd, EV_READ);
+ ev_io_set(&tcp->write_watcher, fd, EV_WRITE);
/* These should have been set up by uv_tcp_init. */
- assert(handle->next_watcher.data == handle);
- assert(handle->write_watcher.data == handle);
- assert(handle->read_watcher.data == handle);
- assert(handle->read_watcher.cb == uv__tcp_io);
- assert(handle->write_watcher.cb == uv__tcp_io);
+ assert(tcp->next_watcher.data == tcp);
+ assert(tcp->write_watcher.data == tcp);
+ assert(tcp->read_watcher.data == tcp);
+ assert(tcp->read_watcher.cb == uv__tcp_io);
+ assert(tcp->write_watcher.cb == uv__tcp_io);
return 0;
}
void uv__server_io(EV_P_ ev_io* watcher, int revents) {
- uv_handle_t* handle = watcher->data;
- assert(watcher == &handle->read_watcher ||
- watcher == &handle->write_watcher);
+ int fd;
+ struct sockaddr addr;
+ socklen_t addrlen;
+ uv_tcp_t* tcp = watcher->data;
+
+ assert(watcher == &tcp->read_watcher ||
+ watcher == &tcp->write_watcher);
assert(revents == EV_READ);
- assert(!uv_flag_is_set(handle, UV_CLOSING));
+ assert(!uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING));
- if (handle->accepted_fd >= 0) {
- ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
+ if (tcp->accepted_fd >= 0) {
+ ev_io_stop(EV_DEFAULT_ &tcp->read_watcher);
return;
}
while (1) {
- struct sockaddr addr = { 0 };
- socklen_t addrlen = 0;
-
- assert(handle->accepted_fd < 0);
- int fd = accept(handle->fd, &addr, &addrlen);
+ assert(tcp->accepted_fd < 0);
+ fd = accept(tcp->fd, &addr, &addrlen);
if (fd < 0) {
if (errno == EAGAIN) {
@@ -336,16 +346,16 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) {
/* TODO special trick. unlock reserved socket, accept, close. */
return;
} else {
- uv_err_new(handle, errno);
- uv_close(handle);
+ uv_err_new((uv_handle_t*)tcp, errno);
+ tcp->connection_cb(tcp, -1);
}
} else {
- handle->accepted_fd = fd;
- handle->accept_cb(handle);
- if (handle->accepted_fd >= 0) {
+ tcp->accepted_fd = fd;
+ tcp->connection_cb(tcp, 0);
+ if (tcp->accepted_fd >= 0) {
/* The user hasn't yet accepted called uv_accept() */
- ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
+ ev_io_stop(EV_DEFAULT_ &tcp->read_watcher);
return;
}
}
@@ -353,7 +363,7 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) {
}
-int uv_accept(uv_handle_t* server, uv_handle_t* client,
+int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data) {
if (server->accepted_fd < 0) {
return -1;
@@ -376,32 +386,36 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
}
-int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
- assert(handle->fd >= 0);
+int uv_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
+ int r;
+
+ assert(tcp->fd >= 0);
- if (handle->delayed_error) {
- uv_err_new(handle, handle->delayed_error);
+ if (tcp->delayed_error) {
+ uv_err_new((uv_handle_t*)tcp, tcp->delayed_error);
return -1;
}
- int r = listen(handle->fd, backlog);
+ r = listen(tcp->fd, backlog);
if (r < 0) {
- uv_err_new(handle, errno);
+ uv_err_new((uv_handle_t*)tcp, errno);
return -1;
}
- handle->accept_cb = cb;
+ tcp->connection_cb = cb;
/* Start listening for connections. */
- ev_io_set(&handle->read_watcher, handle->fd, EV_READ);
- ev_set_cb(&handle->read_watcher, uv__server_io);
- ev_io_start(EV_DEFAULT_ &handle->read_watcher);
+ ev_io_set(&tcp->read_watcher, tcp->fd, EV_READ);
+ ev_set_cb(&tcp->read_watcher, uv__server_io);
+ ev_io_start(EV_DEFAULT_ &tcp->read_watcher);
return 0;
}
void uv__finish_close(uv_handle_t* handle) {
+ uv_tcp_t* tcp;
+
assert(uv_flag_is_set(handle, UV_CLOSING));
assert(!uv_flag_is_set(handle, UV_CLOSED));
uv_flag_set(handle, UV_CLOSED);
@@ -411,39 +425,40 @@ void uv__finish_close(uv_handle_t* handle) {
/* XXX Is it necessary to stop these watchers here? weren't they
* supposed to be stopped in uv_close()?
*/
- ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
- ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
+ tcp = (uv_tcp_t*)handle;
+ ev_io_stop(EV_DEFAULT_ &tcp->write_watcher);
+ ev_io_stop(EV_DEFAULT_ &tcp->read_watcher);
- assert(!ev_is_active(&handle->read_watcher));
- assert(!ev_is_active(&handle->write_watcher));
+ assert(!ev_is_active(&tcp->read_watcher));
+ assert(!ev_is_active(&tcp->write_watcher));
- close(handle->fd);
- handle->fd = -1;
+ close(tcp->fd);
+ tcp->fd = -1;
- if (handle->accepted_fd >= 0) {
- close(handle->accepted_fd);
- handle->accepted_fd = -1;
+ if (tcp->accepted_fd >= 0) {
+ close(tcp->accepted_fd);
+ tcp->accepted_fd = -1;
}
break;
case UV_PREPARE:
- assert(!ev_is_active(&handle->prepare_watcher));
+ assert(!ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher));
break;
case UV_CHECK:
- assert(!ev_is_active(&handle->check_watcher));
+ assert(!ev_is_active(&((uv_check_t*)handle)->check_watcher));
break;
case UV_IDLE:
- assert(!ev_is_active(&handle->idle_watcher));
+ assert(!ev_is_active(&((uv_idle_t*)handle)->idle_watcher));
break;
case UV_ASYNC:
- assert(!ev_is_active(&handle->async_watcher));
+ assert(!ev_is_active(&((uv_async_t*)handle)->async_watcher));
break;
case UV_TIMER:
- assert(!ev_is_active(&handle->timer_watcher));
+ assert(!ev_is_active(&((uv_timer_t*)handle)->timer_watcher));
break;
default:
@@ -461,17 +476,20 @@ void uv__finish_close(uv_handle_t* handle) {
}
-uv_req_t* uv_write_queue_head(uv_handle_t* handle) {
- if (ngx_queue_empty(&handle->write_queue)) {
+uv_req_t* uv_write_queue_head(uv_tcp_t* tcp) {
+ ngx_queue_t* q;
+ uv_req_t* req;
+
+ if (ngx_queue_empty(&tcp->write_queue)) {
return NULL;
}
- ngx_queue_t* q = ngx_queue_head(&handle->write_queue);
+ q = ngx_queue_head(&tcp->write_queue);
if (!q) {
return NULL;
}
- uv_req_t* req = ngx_queue_data(q, struct uv_req_s, queue);
+ req = ngx_queue_data(q, struct uv_req_s, queue);
assert(req);
return req;
@@ -491,71 +509,76 @@ void uv__next(EV_P_ ev_idle* watcher, int revents) {
}
-static void uv__drain(uv_handle_t* handle) {
- assert(!uv_write_queue_head(handle));
- assert(handle->write_queue_size == 0);
+static void uv__drain(uv_tcp_t* tcp) {
+ uv_req_t* req;
+ uv_shutdown_cb cb;
- ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
+ assert(!uv_write_queue_head(tcp));
+ assert(tcp->write_queue_size == 0);
+
+ ev_io_stop(EV_DEFAULT_ &tcp->write_watcher);
/* Shutdown? */
- if (uv_flag_is_set(handle, UV_SHUTTING) &&
- !uv_flag_is_set(handle, UV_CLOSING) &&
- !uv_flag_is_set(handle, UV_SHUT)) {
- assert(handle->shutdown_req);
-
- uv_req_t* req = handle->shutdown_req;
- uv_shutdown_cb cb = req->cb;
-
- if (shutdown(handle->fd, SHUT_WR)) {
- /* Error. Nothing we can do, close the handle. */
- uv_err_new(handle, errno);
- uv_close(handle);
+ if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUTTING) &&
+ !uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING) &&
+ !uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT)) {
+ assert(tcp->shutdown_req);
+
+ req = tcp->shutdown_req;
+ cb = (uv_shutdown_cb)req->cb;
+
+ if (shutdown(tcp->fd, SHUT_WR)) {
+ /* Error. Report it. User should call uv_close(). */
+ uv_err_new((uv_handle_t*)tcp, errno);
if (cb) cb(req, -1);
} else {
- uv_err_new(handle, 0);
- uv_flag_set(handle, UV_SHUT);
+ uv_err_new((uv_handle_t*)tcp, 0);
+ uv_flag_set((uv_handle_t*)tcp, UV_SHUT);
if (cb) cb(req, 0);
}
}
}
-void uv__write(uv_handle_t* handle) {
- assert(handle->fd >= 0);
+void uv__write(uv_tcp_t* tcp) {
+ uv_req_t* req;
+ struct iovec* iov;
+ int iovcnt;
+ ssize_t n;
+ uv_write_cb cb;
+
+ assert(tcp->fd >= 0);
/* TODO: should probably while(1) here until EAGAIN */
/* Get the request at the head of the queue. */
- uv_req_t* req = uv_write_queue_head(handle);
+ req = uv_write_queue_head(tcp);
if (!req) {
- assert(handle->write_queue_size == 0);
- uv__drain(handle);
+ assert(tcp->write_queue_size == 0);
+ uv__drain(tcp);
return;
}
- assert(req->handle == handle);
+ assert(req->handle == (uv_handle_t*)tcp);
/* Cast to iovec. We had to have our own uv_buf_t instead of iovec
* because Windows's WSABUF is not an iovec.
*/
assert(sizeof(uv_buf_t) == sizeof(struct iovec));
- struct iovec* iov = (struct iovec*) &(req->bufs[req->write_index]);
- int iovcnt = req->bufcnt - req->write_index;
+ iov = (struct iovec*) &(req->bufs[req->write_index]);
+ iovcnt = req->bufcnt - req->write_index;
/* Now do the actual writev. Note that we've been updating the pointers
* inside the iov each time we write. So there is no need to offset it.
*/
- ssize_t n = writev(handle->fd, iov, iovcnt);
+ n = writev(tcp->fd, iov, iovcnt);
- uv_write_cb cb = req->cb;
+ cb = (uv_write_cb)req->cb;
if (n < 0) {
if (errno != EAGAIN) {
- uv_err_t err = uv_err_new(handle, errno);
-
- /* XXX How do we handle the error? Need test coverage here. */
- uv_close(handle);
+ uv_err_t err = uv_err_new((uv_handle_t*)tcp, errno);
if (cb) {
cb(req, -1);
@@ -575,7 +598,7 @@ void uv__write(uv_handle_t* handle) {
if (n < len) {
buf->base += n;
buf->len -= n;
- handle->write_queue_size -= n;
+ tcp->write_queue_size -= n;
n = 0;
/* There is more to write. Break and ensure the watcher is pending. */
@@ -588,14 +611,14 @@ void uv__write(uv_handle_t* handle) {
assert(n >= len);
n -= len;
- assert(handle->write_queue_size >= len);
- handle->write_queue_size -= len;
+ assert(tcp->write_queue_size >= len);
+ tcp->write_queue_size -= len;
if (req->write_index == req->bufcnt) {
/* Then we're done! */
assert(n == 0);
- /* Pop the req off handle->write_queue. */
+ /* Pop the req off tcp->write_queue. */
ngx_queue_remove(&req->queue);
free(req->bufs); /* FIXME: we should not be allocing for each read */
req->bufs = NULL;
@@ -605,11 +628,11 @@ void uv__write(uv_handle_t* handle) {
cb(req, 0);
}
- if (!ngx_queue_empty(&handle->write_queue)) {
- assert(handle->write_queue_size > 0);
+ if (!ngx_queue_empty(&tcp->write_queue)) {
+ assert(tcp->write_queue_size > 0);
} else {
/* Write queue drained. */
- uv__drain(handle);
+ uv__drain(tcp);
}
return;
@@ -622,100 +645,101 @@ void uv__write(uv_handle_t* handle) {
assert(n == 0 || n == -1);
/* We're not done yet. */
- assert(ev_is_active(&handle->write_watcher));
- ev_io_start(EV_DEFAULT_ &handle->write_watcher);
+ assert(ev_is_active(&tcp->write_watcher));
+ ev_io_start(EV_DEFAULT_ &tcp->write_watcher);
}
-void uv__read(uv_handle_t* handle) {
+void uv__read(uv_tcp_t* tcp) {
+ uv_buf_t buf;
+ struct iovec* iov;
+ ssize_t nread;
+
/* XXX: Maybe instead of having UV_READING we just test if
- * handle->read_cb is NULL or not?
+ * tcp->read_cb is NULL or not?
*/
- while (handle->read_cb && uv_flag_is_set(handle, UV_READING)) {
- assert(alloc_cb);
- uv_buf_t buf = alloc_cb(handle, 64 * 1024);
+ while (tcp->read_cb && uv_flag_is_set((uv_handle_t*)tcp, UV_READING)) {
+ assert(tcp->alloc_cb);
+ buf = tcp->alloc_cb(tcp, 64 * 1024);
assert(buf.len > 0);
assert(buf.base);
- struct iovec* iov = (struct iovec*) &buf;
+ iov = (struct iovec*) &buf;
- ssize_t nread = readv(handle->fd, iov, 1);
+ nread = readv(tcp->fd, iov, 1);
if (nread < 0) {
/* Error */
if (errno == EAGAIN) {
/* Wait for the next one. */
- if (uv_flag_is_set(handle, UV_READING)) {
- ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher);
+ if (uv_flag_is_set((uv_handle_t*)tcp, UV_READING)) {
+ ev_io_start(EV_DEFAULT_UC_ &tcp->read_watcher);
}
- uv_err_new(handle, EAGAIN);
- handle->read_cb(handle, 0, buf);
+ uv_err_new((uv_handle_t*)tcp, EAGAIN);
+ tcp->read_cb(tcp, 0, buf);
return;
} else {
- uv_err_new(handle, errno);
- uv_close(handle);
- handle->read_cb(handle, -1, buf);
- assert(!ev_is_active(&handle->read_watcher));
+ /* Error. User should call uv_close(). */
+ uv_err_new((uv_handle_t*)tcp, errno);
+ tcp->read_cb(tcp, -1, buf);
+ assert(!ev_is_active(&tcp->read_watcher));
return;
}
} else if (nread == 0) {
/* EOF */
- uv_err_new_artificial(handle, UV_EOF);
- ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher);
- handle->read_cb(handle, -1, buf);
-
- if (uv_flag_is_set(handle, UV_SHUT)) {
- uv_close(handle);
- }
+ uv_err_new_artificial((uv_handle_t*)tcp, UV_EOF);
+ ev_io_stop(EV_DEFAULT_UC_ &tcp->read_watcher);
+ tcp->read_cb(tcp, -1, buf);
return;
} else {
/* Successful read */
- handle->read_cb(handle, nread, buf);
+ tcp->read_cb(tcp, nread, buf);
}
}
}
int uv_shutdown(uv_req_t* req) {
- uv_handle_t* handle = req->handle;
- assert(handle->fd >= 0);
+ uv_tcp_t* tcp = (uv_tcp_t*)req->handle;
+ assert(tcp->fd >= 0);
+ assert(tcp->type == UV_TCP);
- if (uv_flag_is_set(handle, UV_SHUT) ||
- uv_flag_is_set(handle, UV_CLOSED) ||
- uv_flag_is_set(handle, UV_CLOSING)) {
+ if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT) ||
+ uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSED) ||
+ uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING)) {
return -1;
}
- handle->shutdown_req = req;
+ tcp->shutdown_req = req;
req->type = UV_SHUTDOWN;
- uv_flag_set(handle, UV_SHUTTING);
+ uv_flag_set((uv_handle_t*)tcp, UV_SHUTTING);
- ev_io_start(EV_DEFAULT_UC_ &handle->write_watcher);
+ ev_io_start(EV_DEFAULT_UC_ &tcp->write_watcher);
return 0;
}
void uv__tcp_io(EV_P_ ev_io* watcher, int revents) {
- uv_handle_t* handle = watcher->data;
- assert(watcher == &handle->read_watcher ||
- watcher == &handle->write_watcher);
+ uv_tcp_t* tcp = watcher->data;
+ assert(watcher == &tcp->read_watcher ||
+ watcher == &tcp->write_watcher);
- assert(handle->fd >= 0);
+ assert(tcp->fd >= 0);
- assert(!uv_flag_is_set(handle, UV_CLOSING));
+ assert(!uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING));
- if (handle->connect_req) {
- uv__tcp_connect(handle);
+ if (tcp->connect_req) {
+ uv__tcp_connect(tcp);
} else {
if (revents & EV_READ) {
- uv__read(handle);
+ uv__read(tcp);
}
if (revents & EV_WRITE) {
- uv__write(handle);
+ uv__write(tcp);
}
}
}
@@ -726,33 +750,35 @@ void uv__tcp_io(EV_P_ ev_io* watcher, int revents) {
* In order to determine if we've errored out or succeeded must call
* getsockopt.
*/
-static void uv__tcp_connect(uv_handle_t* handle) {
+static void uv__tcp_connect(uv_tcp_t* tcp) {
int error;
+ uv_req_t* req;
+ uv_connect_cb connect_cb;
socklen_t errorsize = sizeof(int);
- assert(handle->fd >= 0);
+ assert(tcp->fd >= 0);
- uv_req_t* req = handle->connect_req;
+ req = tcp->connect_req;
assert(req);
- if (handle->delayed_error) {
+ if (tcp->delayed_error) {
/* To smooth over the differences between unixes errors that
* were reported synchronously on the first connect can be delayed
* until the next tick--which is now.
*/
- error = handle->delayed_error;
- handle->delayed_error = 0;
+ error = tcp->delayed_error;
+ tcp->delayed_error = 0;
} else {
/* Normal situation: we need to get the socket error from the kernel. */
- getsockopt(handle->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize);
+ getsockopt(tcp->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize);
}
if (!error) {
- ev_io_start(EV_DEFAULT_ &handle->read_watcher);
+ ev_io_start(EV_DEFAULT_ &tcp->read_watcher);
/* Successful connection */
- handle->connect_req = NULL;
- uv_connect_cb connect_cb = req->cb;
+ tcp->connect_req = NULL;
+ connect_cb = (uv_connect_cb) req->cb;
if (connect_cb) {
connect_cb(req, 0);
}
@@ -762,31 +788,32 @@ static void uv__tcp_connect(uv_handle_t* handle) {
return;
} else {
/* Error */
- uv_err_t err = uv_err_new(handle, error);
+ uv_err_t err = uv_err_new((uv_handle_t*)tcp, error);
- handle->connect_req = NULL;
+ tcp->connect_req = NULL;
- uv_connect_cb connect_cb = req->cb;
+ connect_cb = (uv_connect_cb) req->cb;
if (connect_cb) {
connect_cb(req, -1);
}
-
- uv_close(handle);
}
}
-int uv_connect(uv_req_t* req, struct sockaddr* addr) {
- uv_handle_t* handle = req->handle;
+int uv_connect(uv_req_t* req, struct sockaddr_in addr) {
+ uv_tcp_t* tcp = (uv_tcp_t*)req->handle;
+ int addrsize;
+ int r;
- if (handle->fd <= 0) {
+ if (tcp->fd <= 0) {
int fd = socket(AF_INET, SOCK_STREAM, 0);
+
if (fd < 0) {
- uv_err_new(handle, errno);
+ uv_err_new((uv_handle_t*)tcp, errno);
return -1;
}
- if (uv_tcp_open(handle, fd)) {
+ if (uv_tcp_open(tcp, fd)) {
close(fd);
return -2;
}
@@ -795,22 +822,23 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) {
req->type = UV_CONNECT;
ngx_queue_init(&req->queue);
- if (handle->connect_req) {
- uv_err_new(handle, EALREADY);
+ if (tcp->connect_req) {
+ uv_err_new((uv_handle_t*)tcp, EALREADY);
return -1;
}
- if (handle->type != UV_TCP) {
- uv_err_new(handle, ENOTSOCK);
+ if (tcp->type != UV_TCP) {
+ uv_err_new((uv_handle_t*)tcp, ENOTSOCK);
return -1;
}
- handle->connect_req = req;
+ tcp->connect_req = req;
- int addrsize = sizeof(struct sockaddr_in);
+ addrsize = sizeof(struct sockaddr_in);
+ assert(addr.sin_family == AF_INET);
- int r = connect(handle->fd, addr, addrsize);
- handle->delayed_error = 0;
+ r = connect(tcp->fd, (struct sockaddr*)&addr, addrsize);
+ tcp->delayed_error = 0;
if (r != 0 && errno != EINPROGRESS) {
switch (errno) {
@@ -819,20 +847,20 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) {
* wait.
*/
case ECONNREFUSED:
- handle->delayed_error = errno;
+ tcp->delayed_error = errno;
break;
default:
- uv_err_new(handle, errno);
+ uv_err_new((uv_handle_t*)tcp, errno);
return -1;
}
}
- assert(handle->write_watcher.data == handle);
- ev_io_start(EV_DEFAULT_ &handle->write_watcher);
+ assert(tcp->write_watcher.data == tcp);
+ ev_io_start(EV_DEFAULT_ &tcp->write_watcher);
- if (handle->delayed_error) {
- ev_feed_event(EV_DEFAULT_ &handle->write_watcher, EV_WRITE);
+ if (tcp->delayed_error) {
+ ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE);
}
return 0;
@@ -855,8 +883,8 @@ static size_t uv__buf_count(uv_buf_t bufs[], int bufcnt) {
* This is not required for the uv_buf_t array.
*/
int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
- uv_handle_t* handle = req->handle;
- assert(handle->fd >= 0);
+ uv_tcp_t* tcp = (uv_tcp_t*)req->handle;
+ assert(tcp->fd >= 0);
ngx_queue_init(&req->queue);
req->type = UV_WRITE;
@@ -867,17 +895,17 @@ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
req->bufcnt = bufcnt;
req->write_index = 0;
- handle->write_queue_size += uv__buf_count(bufs, bufcnt);
+ tcp->write_queue_size += uv__buf_count(bufs, bufcnt);
/* Append the request to write_queue. */
- ngx_queue_insert_tail(&handle->write_queue, &req->queue);
+ ngx_queue_insert_tail(&tcp->write_queue, &req->queue);
- assert(!ngx_queue_empty(&handle->write_queue));
- assert(handle->write_watcher.cb == uv__tcp_io);
- assert(handle->write_watcher.data == handle);
- assert(handle->write_watcher.fd == handle->fd);
+ assert(!ngx_queue_empty(&tcp->write_queue));
+ assert(tcp->write_watcher.cb == uv__tcp_io);
+ assert(tcp->write_watcher.data == tcp);
+ assert(tcp->write_watcher.fd == tcp->fd);
- ev_io_start(EV_DEFAULT_ &handle->write_watcher);
+ ev_io_start(EV_DEFAULT_ &tcp->write_watcher);
return 0;
}
@@ -903,44 +931,41 @@ int64_t uv_now() {
}
-int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
- /* The UV_READING flag is irrelevant of the state of the handle - it just
+int uv_read_start(uv_tcp_t* tcp, uv_alloc_cb alloc_cb, uv_read_cb read_cb) {
+ /* The UV_READING flag is irrelevant of the state of the tcp - it just
* expresses the desired state of the user.
*/
- uv_flag_set(handle, UV_READING);
+ uv_flag_set((uv_handle_t*)tcp, UV_READING);
/* TODO: try to do the read inline? */
- /* TODO: keep track of handle state. If we've gotten a EOF then we should
+ /* TODO: keep track of tcp state. If we've gotten a EOF then we should
* not start the IO watcher.
*/
- assert(handle->fd >= 0);
- handle->read_cb = cb;
+ assert(tcp->fd >= 0);
+ assert(alloc_cb);
+
+ tcp->read_cb = read_cb;
+ tcp->alloc_cb = alloc_cb;
/* These should have been set by uv_tcp_init. */
- assert(handle->read_watcher.data == handle);
- assert(handle->read_watcher.cb == uv__tcp_io);
+ assert(tcp->read_watcher.data == tcp);
+ assert(tcp->read_watcher.cb == uv__tcp_io);
- ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher);
+ ev_io_start(EV_DEFAULT_UC_ &tcp->read_watcher);
return 0;
}
-int uv_read_stop(uv_handle_t* handle) {
- uv_flag_unset(handle, UV_READING);
+int uv_read_stop(uv_tcp_t* tcp) {
+ uv_flag_unset((uv_handle_t*)tcp, UV_READING);
- ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher);
- handle->read_cb = NULL;
+ ev_io_stop(EV_DEFAULT_UC_ &tcp->read_watcher);
+ tcp->read_cb = NULL;
+ tcp->alloc_cb = NULL;
return 0;
}
-void uv_free(uv_handle_t* handle) {
- free(handle);
- /* lists? */
- return;
-}
-
-
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) {
req->type = UV_UNKNOWN_REQ;
req->cb = cb;
@@ -950,30 +975,32 @@ void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) {
static void uv__prepare(EV_P_ ev_prepare* w, int revents) {
- uv_handle_t* handle = (uv_handle_t*)(w->data);
+ uv_prepare_t* prepare = w->data;
- if (handle->prepare_cb) handle->prepare_cb(handle, 0);
+ if (prepare->prepare_cb) {
+ prepare->prepare_cb((uv_handle_t*)prepare, 0);
+ }
}
-int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
- uv__handle_init(handle, UV_PREPARE, close_cb, data);
+int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data) {
+ uv__handle_init((uv_handle_t*)prepare, UV_PREPARE, close_cb, data);
- ev_prepare_init(&handle->prepare_watcher, uv__prepare);
- handle->prepare_watcher.data = handle;
+ ev_prepare_init(&prepare->prepare_watcher, uv__prepare);
+ prepare->prepare_watcher.data = prepare;
- handle->prepare_cb = NULL;
+ prepare->prepare_cb = NULL;
return 0;
}
-int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb) {
- int was_active = ev_is_active(&handle->prepare_watcher);
+int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb) {
+ int was_active = ev_is_active(&prepare->prepare_watcher);
- handle->prepare_cb = cb;
+ prepare->prepare_cb = cb;
- ev_prepare_start(EV_DEFAULT_UC_ &handle->prepare_watcher);
+ ev_prepare_start(EV_DEFAULT_UC_ &prepare->prepare_watcher);
if (!was_active) {
ev_unref(EV_DEFAULT_UC);
@@ -983,10 +1010,10 @@ int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb) {
}
-int uv_prepare_stop(uv_handle_t* handle) {
- int was_active = ev_is_active(&handle->prepare_watcher);
+int uv_prepare_stop(uv_prepare_t* prepare) {
+ int was_active = ev_is_active(&prepare->prepare_watcher);
- ev_prepare_stop(EV_DEFAULT_UC_ &handle->prepare_watcher);
+ ev_prepare_stop(EV_DEFAULT_UC_ &prepare->prepare_watcher);
if (was_active) {
ev_ref(EV_DEFAULT_UC);
@@ -997,30 +1024,32 @@ int uv_prepare_stop(uv_handle_t* handle) {
static void uv__check(EV_P_ ev_check* w, int revents) {
- uv_handle_t* handle = (uv_handle_t*)(w->data);
+ uv_check_t* check = w->data;
- if (handle->check_cb) handle->check_cb(handle, 0);
+ if (check->check_cb) {
+ check->check_cb((uv_handle_t*)check, 0);
+ }
}
-int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
- uv__handle_init(handle, UV_CHECK, close_cb, data);
+int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data) {
+ uv__handle_init((uv_handle_t*)check, UV_CHECK, close_cb, data);
- ev_check_init(&handle->check_watcher, uv__check);
- handle->check_watcher.data = handle;
+ ev_check_init(&check->check_watcher, uv__check);
+ check->check_watcher.data = check;
- handle->check_cb = NULL;
+ check->check_cb = NULL;
return 0;
}
-int uv_check_start(uv_handle_t* handle, uv_loop_cb cb) {
- int was_active = ev_is_active(&handle->prepare_watcher);
+int uv_check_start(uv_check_t* check, uv_loop_cb cb) {
+ int was_active = ev_is_active(&check->check_watcher);
- handle->check_cb = cb;
+ check->check_cb = cb;
- ev_check_start(EV_DEFAULT_UC_ &handle->check_watcher);
+ ev_check_start(EV_DEFAULT_UC_ &check->check_watcher);
if (!was_active) {
ev_unref(EV_DEFAULT_UC);
@@ -1030,10 +1059,10 @@ int uv_check_start(uv_handle_t* handle, uv_loop_cb cb) {
}
-int uv_check_stop(uv_handle_t* handle) {
- int was_active = ev_is_active(&handle->check_watcher);
+int uv_check_stop(uv_check_t* check) {
+ int was_active = ev_is_active(&check->check_watcher);
- ev_check_stop(EV_DEFAULT_UC_ &handle->check_watcher);
+ ev_check_stop(EV_DEFAULT_UC_ &check->check_watcher);
if (was_active) {
ev_ref(EV_DEFAULT_UC);
@@ -1044,30 +1073,32 @@ int uv_check_stop(uv_handle_t* handle) {
static void uv__idle(EV_P_ ev_idle* w, int revents) {
- uv_handle_t* handle = (uv_handle_t*)(w->data);
+ uv_idle_t* idle = (uv_idle_t*)(w->data);
- if (handle->idle_cb) handle->idle_cb(handle, 0);
+ if (idle->idle_cb) {
+ idle->idle_cb((uv_handle_t*)idle, 0);
+ }
}
-int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
- uv__handle_init(handle, UV_IDLE, close_cb, data);
+int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data) {
+ uv__handle_init((uv_handle_t*)idle, UV_IDLE, close_cb, data);
- ev_idle_init(&handle->idle_watcher, uv__idle);
- handle->idle_watcher.data = handle;
+ ev_idle_init(&idle->idle_watcher, uv__idle);
+ idle->idle_watcher.data = idle;
- handle->idle_cb = NULL;
+ idle->idle_cb = NULL;
return 0;
}
-int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb) {
- int was_active = ev_is_active(&handle->idle_watcher);
+int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb) {
+ int was_active = ev_is_active(&idle->idle_watcher);
- handle->idle_cb = cb;
- ev_idle_start(EV_DEFAULT_UC_ &handle->idle_watcher);
+ idle->idle_cb = cb;
+ ev_idle_start(EV_DEFAULT_UC_ &idle->idle_watcher);
if (!was_active) {
ev_unref(EV_DEFAULT_UC);
@@ -1077,10 +1108,10 @@ int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb) {
}
-int uv_idle_stop(uv_handle_t* handle) {
- int was_active = ev_is_active(&handle->idle_watcher);
+int uv_idle_stop(uv_idle_t* idle) {
+ int was_active = ev_is_active(&idle->idle_watcher);
- ev_idle_stop(EV_DEFAULT_UC_ &handle->idle_watcher);
+ ev_idle_stop(EV_DEFAULT_UC_ &idle->idle_watcher);
if (was_active) {
ev_ref(EV_DEFAULT_UC);
@@ -1093,16 +1124,16 @@ int uv_idle_stop(uv_handle_t* handle) {
int uv_is_active(uv_handle_t* handle) {
switch (handle->type) {
case UV_TIMER:
- return ev_is_active(&handle->timer_watcher);
+ return ev_is_active(&((uv_timer_t*)handle)->timer_watcher);
case UV_PREPARE:
- return ev_is_active(&handle->prepare_watcher);
+ return ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher);
case UV_CHECK:
- return ev_is_active(&handle->check_watcher);
+ return ev_is_active(&((uv_check_t*)handle)->check_watcher);
case UV_IDLE:
- return ev_is_active(&handle->idle_watcher);
+ return ev_is_active(&((uv_idle_t*)handle)->idle_watcher);
default:
return 1;
@@ -1111,96 +1142,156 @@ int uv_is_active(uv_handle_t* handle) {
static void uv__async(EV_P_ ev_async* w, int revents) {
- uv_handle_t* handle = (uv_handle_t*)(w->data);
+ uv_async_t* async = w->data;
- if (handle->async_cb) handle->async_cb(handle, 0);
+ if (async->async_cb) {
+ async->async_cb((uv_handle_t*)async, 0);
+ }
}
-int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
+int uv_async_init(uv_async_t* async, uv_async_cb async_cb,
uv_close_cb close_cb, void* data) {
- uv__handle_init(handle, UV_ASYNC, close_cb, data);
+ uv__handle_init((uv_handle_t*)async, UV_ASYNC, close_cb, data);
- ev_async_init(&handle->async_watcher, uv__async);
- handle->async_watcher.data = handle;
+ ev_async_init(&async->async_watcher, uv__async);
+ async->async_watcher.data = async;
- handle->async_cb = async_cb;
+ async->async_cb = async_cb;
/* Note: This does not have symmetry with the other libev wrappers. */
- ev_async_start(EV_DEFAULT_UC_ &handle->async_watcher);
+ ev_async_start(EV_DEFAULT_UC_ &async->async_watcher);
ev_unref(EV_DEFAULT_UC);
return 0;
}
-int uv_async_send(uv_handle_t* handle) {
- ev_async_send(EV_DEFAULT_UC_ &handle->async_watcher);
+int uv_async_send(uv_async_t* async) {
+ ev_async_send(EV_DEFAULT_UC_ &async->async_watcher);
}
static void uv__timer_cb(EV_P_ ev_timer* w, int revents) {
- uv_handle_t* handle = (uv_handle_t*)(w->data);
+ uv_timer_t* timer = w->data;
if (!ev_is_active(w)) {
ev_ref(EV_DEFAULT_UC);
}
- if (handle->timer_cb) handle->timer_cb(handle, 0);
+ if (timer->timer_cb) {
+ timer->timer_cb((uv_handle_t*)timer, 0);
+ }
}
-int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
- uv__handle_init(handle, UV_TIMER, close_cb, data);
+int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data) {
+ uv__handle_init((uv_handle_t*)timer, UV_TIMER, close_cb, data);
- ev_init(&handle->timer_watcher, uv__timer_cb);
- handle->timer_watcher.data = handle;
+ ev_init(&timer->timer_watcher, uv__timer_cb);
+ timer->timer_watcher.data = timer;
return 0;
}
-int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout,
+int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout,
int64_t repeat) {
- if (ev_is_active(&handle->timer_watcher)) {
+ if (ev_is_active(&timer->timer_watcher)) {
return -1;
}
- handle->timer_cb = cb;
- ev_timer_set(&handle->timer_watcher, timeout / 1000.0, repeat / 1000.0);
- ev_timer_start(EV_DEFAULT_UC_ &handle->timer_watcher);
+ timer->timer_cb = cb;
+ ev_timer_set(&timer->timer_watcher, timeout / 1000.0, repeat / 1000.0);
+ ev_timer_start(EV_DEFAULT_UC_ &timer->timer_watcher);
ev_unref(EV_DEFAULT_UC);
return 0;
}
-int uv_timer_stop(uv_handle_t* handle) {
- if (ev_is_active(&handle->timer_watcher)) {
+int uv_timer_stop(uv_timer_t* timer) {
+ if (ev_is_active(&timer->timer_watcher)) {
ev_ref(EV_DEFAULT_UC);
}
- ev_timer_stop(EV_DEFAULT_UC_ &handle->timer_watcher);
+ ev_timer_stop(EV_DEFAULT_UC_ &timer->timer_watcher);
return 0;
}
-int uv_timer_again(uv_handle_t* handle) {
- if (!ev_is_active(&handle->timer_watcher)) {
- uv_err_new(handle, EINVAL);
+int uv_timer_again(uv_timer_t* timer) {
+ if (!ev_is_active(&timer->timer_watcher)) {
+ uv_err_new((uv_handle_t*)timer, EINVAL);
return -1;
}
- ev_timer_again(EV_DEFAULT_UC_ &handle->timer_watcher);
+ ev_timer_again(EV_DEFAULT_UC_ &timer->timer_watcher);
return 0;
}
-void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat) {
- assert(handle->type == UV_TIMER);
- handle->timer_watcher.repeat = repeat / 1000.0;
+void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat) {
+ assert(timer->type == UV_TIMER);
+ timer->timer_watcher.repeat = repeat / 1000.0;
+}
+
+int64_t uv_timer_get_repeat(uv_timer_t* timer) {
+ assert(timer->type == UV_TIMER);
+ return (int64_t)(1000 * timer->timer_watcher.repeat);
}
-int64_t uv_timer_get_repeat(uv_handle_t* handle) {
- assert(handle->type == UV_TIMER);
- return (int64_t)(1000 * handle->timer_watcher.repeat);
+
+int uv_get_exepath(char* buffer, size_t* size) {
+ uint32_t usize;
+ int result;
+ char* path;
+ char* fullpath;
+
+ if (!buffer || !size) {
+ return -1;
+ }
+
+#if defined(__APPLE__)
+ usize = *size;
+ result = _NSGetExecutablePath(buffer, &usize);
+ if (result) return result;
+
+ path = (char*)malloc(2 * PATH_MAX);
+ fullpath = realpath(buffer, path);
+
+ if (fullpath == NULL) {
+ free(path);
+ return -1;
+ }
+
+ strncpy(buffer, fullpath, *size);
+ free(fullpath);
+ *size = strlen(buffer);
+ return 0;
+#elif defined(__linux__)
+ *size = readlink("/proc/self/exe", buffer, *size - 1);
+ if (*size <= 0) return -1;
+ buffer[*size] = '\0';
+ return 0;
+#elif defined(__FreeBSD__)
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PATHNAME;
+ mib[3] = -1;
+
+ size_t cb = *size;
+ if (sysctl(mib, 4, buffer, &cb, NULL, 0) < 0) {
+ *size = 0;
+ return -1;
+ }
+ *size = strlen(buffer);
+
+ return 0;
+#else
+ assert(0 && "implement me");
+ /* Need to return argv[0] */
+#endif
}
+
diff --git a/deps/uv/uv-unix.h b/deps/uv/uv-unix.h
index 6301ac3f3d..9d68901334 100644
--- a/deps/uv/uv-unix.h
+++ b/deps/uv/uv-unix.h
@@ -38,7 +38,7 @@ typedef struct {
} uv_buf_t;
-#define uv_req_private_fields \
+#define UV_REQ_PRIVATE_FIELDS \
int write_index; \
ev_timer timer; \
ngx_queue_t queue; \
@@ -47,33 +47,52 @@ typedef struct {
/* TODO: union or classes please! */
-#define uv_handle_private_fields \
+#define UV_HANDLE_PRIVATE_FIELDS \
int fd; \
int flags; \
- ev_idle next_watcher; \
-/* UV_TCP */ \
+ ev_idle next_watcher;
+
+
+/* UV_TCP */
+#define UV_TCP_PRIVATE_FIELDS \
int delayed_error; \
uv_read_cb read_cb; \
- uv_accept_cb accept_cb; \
+ uv_alloc_cb alloc_cb; \
+ uv_connection_cb connection_cb; \
int accepted_fd; \
uv_req_t *connect_req; \
uv_req_t *shutdown_req; \
ev_io read_watcher; \
ev_io write_watcher; \
- ngx_queue_t write_queue; \
+ ngx_queue_t write_queue;
+
+
/* UV_PREPARE */ \
+#define UV_PREPARE_PRIVATE_FIELDS \
ev_prepare prepare_watcher; \
- uv_loop_cb prepare_cb; \
-/* UV_CHECK */ \
+ uv_loop_cb prepare_cb;
+
+
+/* UV_CHECK */
+#define UV_CHECK_PRIVATE_FIELDS \
ev_check check_watcher; \
- uv_loop_cb check_cb; \
-/* UV_IDLE */ \
+ uv_loop_cb check_cb;
+
+
+/* UV_IDLE */
+#define UV_IDLE_PRIVATE_FIELDS \
ev_idle idle_watcher; \
- uv_loop_cb idle_cb; \
-/* UV_ASYNC */ \
+ uv_loop_cb idle_cb;
+
+
+/* UV_ASYNC */
+#define UV_ASYNC_PRIVATE_FIELDS \
ev_async async_watcher; \
- uv_loop_cb async_cb; \
-/* UV_TIMER */ \
+ uv_loop_cb async_cb;
+
+
+/* UV_TIMER */
+#define UV_TIMER_PRIVATE_FIELDS \
ev_timer timer_watcher; \
uv_loop_cb timer_cb;
diff --git a/deps/uv/uv-win.c b/deps/uv/uv-win.c
index c25bb7063a..38a4421842 100644
--- a/deps/uv/uv-win.c
+++ b/deps/uv/uv-win.c
@@ -141,12 +141,12 @@ static LPFN_TRANSMITFILE pTransmitFile;
/* Binary tree used to keep the list of timers sorted. */
-static int uv_timer_compare(uv_handle_t* handle1, uv_handle_t* handle2);
-RB_HEAD(uv_timer_s, uv_handle_s);
-RB_PROTOTYPE_STATIC(uv_timer_s, uv_handle_s, tree_entry, uv_timer_compare);
+static int uv_timer_compare(uv_timer_t* handle1, uv_timer_t* handle2);
+RB_HEAD(uv_timer_tree_s, uv_timer_s);
+RB_PROTOTYPE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
/* The head of the timers tree */
-static struct uv_timer_s uv_timers_ = RB_INITIALIZER(uv_timers_);
+static struct uv_timer_tree_s uv_timers_ = RB_INITIALIZER(uv_timers_);
/* Lists of active uv_prepare / uv_check / uv_idle watchers */
@@ -164,6 +164,11 @@ static uv_handle_t* uv_next_loop_handle_ = NULL;
static uv_handle_t* uv_endgame_handles_ = NULL;
+/* Tail of a single-linked circular queue of pending reqs. If the queue is */
+/* empty, tail_ is NULL. If there is only one item, tail_->next_req == tail_ */
+static uv_req_t* uv_pending_reqs_tail_ = NULL;
+
+
/* The current time according to the event loop. in msecs. */
static int64_t uv_now_ = 0;
static int64_t uv_ticks_per_msec_ = 0;
@@ -182,9 +187,6 @@ static uv_err_t uv_last_error_ = { UV_OK, ERROR_SUCCESS };
/* Error message string */
static char* uv_err_str_ = NULL;
-/* Global alloc function */
-uv_alloc_cb uv_alloc_ = NULL;
-
/* Reference count that keeps the event loop alive */
static int uv_refs_ = 0;
@@ -345,7 +347,7 @@ static void uv_get_extension_function(SOCKET socket, GUID guid,
}
-void uv_init(uv_alloc_cb alloc_cb) {
+void uv_init() {
const GUID wsaid_connectex = WSAID_CONNECTEX;
const GUID wsaid_acceptex = WSAID_ACCEPTEX;
const GUID wsaid_getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS;
@@ -357,8 +359,6 @@ void uv_init(uv_alloc_cb alloc_cb) {
LARGE_INTEGER timer_frequency;
SOCKET dummy;
- uv_alloc_ = alloc_cb;
-
/* Initialize winsock */
errorno = WSAStartup(MAKEWORD(2, 2), &wsa_data);
if (errorno != 0) {
@@ -422,7 +422,40 @@ static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
}
-static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb,
+static void uv_insert_pending_req(uv_req_t* req) {
+ req->next_req = NULL;
+ if (uv_pending_reqs_tail_) {
+ req->next_req = uv_pending_reqs_tail_->next_req;
+ uv_pending_reqs_tail_ = req;
+ } else {
+ req->next_req = req;
+ uv_pending_reqs_tail_ = req;
+ }
+}
+
+
+static uv_req_t* uv_remove_pending_req() {
+ uv_req_t* req;
+
+ if (uv_pending_reqs_tail_) {
+ req = uv_pending_reqs_tail_->next_req;
+
+ if (req == uv_pending_reqs_tail_) {
+ uv_pending_reqs_tail_ = NULL;
+ } else {
+ uv_pending_reqs_tail_->next_req = req->next_req;
+ }
+
+ return req;
+
+ } else {
+ /* queue empty */
+ return NULL;
+ }
+}
+
+
+static int uv_tcp_init_socket(uv_tcp_t* handle, uv_close_cb close_cb,
void* data, SOCKET socket) {
DWORD yes = 1;
@@ -464,15 +497,14 @@ static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb,
}
-static void uv_tcp_init_connection(uv_handle_t* handle) {
+static void uv_tcp_init_connection(uv_tcp_t* handle) {
handle->flags |= UV_HANDLE_CONNECTION;
handle->write_reqs_pending = 0;
- uv_req_init(&(handle->read_req), handle, NULL);
+ uv_req_init(&(handle->read_req), (uv_handle_t*)handle, NULL);
}
-int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
- void* data) {
+int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data) {
SOCKET sock;
sock = socket(AF_INET, SOCK_STREAM, 0);
@@ -490,7 +522,7 @@ int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
}
-static void uv_tcp_endgame(uv_handle_t* handle) {
+static void uv_tcp_endgame(uv_tcp_t* handle) {
uv_err_t err;
int status;
@@ -515,15 +547,6 @@ static void uv_tcp_endgame(uv_handle_t* handle) {
handle->reqs_pending--;
}
- if (handle->flags & UV_HANDLE_EOF &&
- handle->flags & UV_HANDLE_SHUT &&
- !(handle->flags & UV_HANDLE_CLOSING)) {
- /* Because uv_close will add the handle to the endgame_handles list, */
- /* return here and call the close cb the next time. */
- uv_close(handle);
- return;
- }
-
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
@@ -531,7 +554,7 @@ static void uv_tcp_endgame(uv_handle_t* handle) {
if (handle->close_cb) {
uv_last_error_ = handle->error;
- handle->close_cb(handle, handle->error.code == UV_OK ? 0 : 1);
+ handle->close_cb((uv_handle_t*)handle, handle->error.code == UV_OK ? 0 : 1);
}
uv_refs_--;
@@ -539,13 +562,13 @@ static void uv_tcp_endgame(uv_handle_t* handle) {
}
-static void uv_timer_endgame(uv_handle_t* handle) {
+static void uv_timer_endgame(uv_timer_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
- handle->close_cb(handle, 0);
+ handle->close_cb((uv_handle_t*)handle, 0);
}
uv_refs_--;
@@ -567,14 +590,14 @@ static void uv_loop_endgame(uv_handle_t* handle) {
}
-static void uv_async_endgame(uv_handle_t* handle) {
+static void uv_async_endgame(uv_async_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
- handle->close_cb(handle, 0);
+ handle->close_cb((uv_handle_t*)handle, 0);
}
uv_refs_--;
@@ -582,7 +605,7 @@ static void uv_async_endgame(uv_handle_t* handle) {
}
-static void uv_call_endgames() {
+static void uv_process_endgames() {
uv_handle_t* handle;
while (uv_endgame_handles_) {
@@ -593,11 +616,11 @@ static void uv_call_endgames() {
switch (handle->type) {
case UV_TCP:
- uv_tcp_endgame(handle);
+ uv_tcp_endgame((uv_tcp_t*)handle);
break;
case UV_TIMER:
- uv_timer_endgame(handle);
+ uv_timer_endgame((uv_timer_t*)handle);
break;
case UV_PREPARE:
@@ -607,7 +630,7 @@ static void uv_call_endgames() {
break;
case UV_ASYNC:
- uv_async_endgame(handle);
+ uv_async_endgame((uv_async_t*)handle);
break;
default:
@@ -629,6 +652,8 @@ static void uv_want_endgame(uv_handle_t* handle) {
static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
+ uv_tcp_t* tcp;
+
if (handle->flags & UV_HANDLE_CLOSING) {
return 0;
}
@@ -639,34 +664,35 @@ static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
- closesocket(handle->socket);
- if (handle->reqs_pending == 0) {
+ tcp = (uv_tcp_t*)handle;
+ closesocket(tcp->socket);
+ if (tcp->reqs_pending == 0) {
uv_want_endgame(handle);
}
return 0;
case UV_TIMER:
- uv_timer_stop(handle);
+ uv_timer_stop((uv_timer_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_PREPARE:
- uv_prepare_stop(handle);
+ uv_prepare_stop((uv_prepare_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_CHECK:
- uv_check_stop(handle);
+ uv_check_stop((uv_check_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_IDLE:
- uv_idle_stop(handle);
+ uv_idle_stop((uv_idle_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_ASYNC:
- if (!handle->async_sent) {
+ if (!((uv_async_t*)handle)->async_sent) {
uv_want_endgame(handle);
}
return 0;
@@ -695,20 +721,19 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port) {
}
-int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
- int addrsize;
+int uv_bind(uv_tcp_t* handle, struct sockaddr_in addr) {
DWORD err;
+ int r;
+ int addrsize = sizeof(struct sockaddr_in);
- if (addr->sa_family == AF_INET) {
- addrsize = sizeof(struct sockaddr_in);
- } else if (addr->sa_family == AF_INET6) {
- addrsize = sizeof(struct sockaddr_in6);
- } else {
+ if (addr.sin_family != AF_INET) {
uv_set_sys_error(WSAEFAULT);
return -1;
}
- if (bind(handle->socket, addr, addrsize) == SOCKET_ERROR) {
+ r = bind(handle->socket, (struct sockaddr*) &addr, addrsize);
+
+ if (r == SOCKET_ERROR) {
err = WSAGetLastError();
if (err == WSAEADDRINUSE) {
/* Some errors are not to be reported until connect() or listen() */
@@ -726,7 +751,7 @@ int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
}
-static void uv_queue_accept(uv_handle_t* handle) {
+static void uv_queue_accept(uv_tcp_t* handle) {
uv_req_t* req;
BOOL success;
DWORD bytes;
@@ -735,17 +760,21 @@ static void uv_queue_accept(uv_handle_t* handle) {
assert(handle->flags & UV_HANDLE_LISTENING);
assert(handle->accept_socket == INVALID_SOCKET);
+ /* Prepare the uv_req structure. */
+ req = &handle->accept_req;
+ assert(!(req->flags & UV_REQ_PENDING));
+ req->type = UV_ACCEPT;
+ req->flags |= UV_REQ_PENDING;
+
+ /* Open a socket for the accepted connection. */
accept_socket = socket(AF_INET, SOCK_STREAM, 0);
if (accept_socket == INVALID_SOCKET) {
- uv_close_error(handle, uv_new_sys_error(WSAGetLastError()));
+ req->error = uv_new_sys_error(WSAGetLastError());
+ uv_insert_pending_req(req);
return;
}
- /* Prepare the uv_req and OVERLAPPED structures. */
- req = &handle->accept_req;
- assert(!(req->flags & UV_REQ_PENDING));
- req->type = UV_ACCEPT;
- req->flags |= UV_REQ_PENDING;
+ /* Prepare the overlapped structure. */
memset(&(req->overlapped), 0, sizeof(req->overlapped));
success = pAcceptEx(handle->socket,
@@ -758,11 +787,11 @@ static void uv_queue_accept(uv_handle_t* handle) {
&req->overlapped);
if (!success && WSAGetLastError() != ERROR_IO_PENDING) {
- uv_set_sys_error(WSAGetLastError());
- /* destroy the preallocated client handle */
+ /* Make this req pending reporting an error. */
+ req->error = uv_new_sys_error(WSAGetLastError());
+ uv_insert_pending_req(req);
+ /* Destroy the preallocated client socket. */
closesocket(accept_socket);
- /* destroy ourselves */
- uv_close_error(handle, uv_last_error_);
return;
}
@@ -773,8 +802,8 @@ static void uv_queue_accept(uv_handle_t* handle) {
}
-static void uv_queue_read(uv_handle_t* handle) {
- uv_req_t *req;
+static void uv_queue_read(uv_tcp_t* handle) {
+ uv_req_t* req;
uv_buf_t buf;
int result;
DWORD bytes, flags;
@@ -798,8 +827,9 @@ static void uv_queue_read(uv_handle_t* handle) {
&req->overlapped,
NULL);
if (result != 0 && WSAGetLastError() != ERROR_IO_PENDING) {
- uv_set_sys_error(WSAGetLastError());
- uv_close_error(handle, uv_last_error_);
+ /* Make this req pending reporting an error. */
+ req->error = uv_new_sys_error(WSAGetLastError());
+ uv_insert_pending_req(req);
return;
}
@@ -808,7 +838,7 @@ static void uv_queue_read(uv_handle_t* handle) {
}
-int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
+int uv_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
assert(backlog > 0);
if (handle->flags & UV_HANDLE_BIND_ERROR) {
@@ -829,16 +859,16 @@ int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
}
handle->flags |= UV_HANDLE_LISTENING;
- handle->accept_cb = cb;
+ handle->connection_cb = cb;
- uv_req_init(&(handle->accept_req), handle, NULL);
+ uv_req_init(&(handle->accept_req), (uv_handle_t*)handle, NULL);
uv_queue_accept(handle);
return 0;
}
-int uv_accept(uv_handle_t* server, uv_handle_t* client,
+int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data) {
int rv = 0;
@@ -848,7 +878,6 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
}
if (uv_tcp_init_socket(client, close_cb, data, server->accept_socket) == -1) {
- uv_fatal_error(uv_last_error_.sys_errno_, "init");
closesocket(server->accept_socket);
rv = -1;
}
@@ -865,7 +894,7 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
}
-int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
+int uv_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb) {
if (!(handle->flags & UV_HANDLE_CONNECTION)) {
uv_set_sys_error(WSAEINVAL);
return -1;
@@ -882,7 +911,8 @@ int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
}
handle->flags |= UV_HANDLE_READING;
- handle->read_cb = cb;
+ handle->read_cb = read_cb;
+ handle->alloc_cb = alloc_cb;
/* If reading was stopped and then started again, there could stell be a */
/* read request pending. */
@@ -893,18 +923,18 @@ int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
}
-int uv_read_stop(uv_handle_t* handle) {
+int uv_read_stop(uv_tcp_t* handle) {
handle->flags &= ~UV_HANDLE_READING;
return 0;
}
-int uv_connect(uv_req_t* req, struct sockaddr* addr) {
- int addrsize;
+int uv_connect(uv_req_t* req, struct sockaddr_in addr) {
+ int addrsize = sizeof(struct sockaddr_in);
BOOL success;
DWORD bytes;
- uv_handle_t* handle = req->handle;
+ uv_tcp_t* handle = (uv_tcp_t*)req->handle;
assert(!(req->flags & UV_REQ_PENDING));
@@ -913,25 +943,20 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) {
return -1;
}
- if (addr->sa_family == AF_INET) {
- addrsize = sizeof(struct sockaddr_in);
- if (!(handle->flags & UV_HANDLE_BOUND) &&
- uv_bind(handle, (struct sockaddr*)&uv_addr_ip4_any_) < 0)
- return -1;
- } else if (addr->sa_family == AF_INET6) {
- addrsize = sizeof(struct sockaddr_in6);
- assert(0);
- return -1;
- } else {
- assert(0);
+ if (addr.sin_family != AF_INET) {
+ uv_set_sys_error(WSAEFAULT);
return -1;
}
+ if (!(handle->flags & UV_HANDLE_BOUND) &&
+ uv_bind(handle, uv_addr_ip4_any_) < 0)
+ return -1;
+
memset(&req->overlapped, 0, sizeof(req->overlapped));
req->type = UV_CONNECT;
success = pConnectEx(handle->socket,
- addr,
+ (struct sockaddr*)&addr,
addrsize,
NULL,
0,
@@ -965,7 +990,7 @@ static size_t uv_count_bufs(uv_buf_t bufs[], int count) {
int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
int result;
DWORD bytes, err;
- uv_handle_t* handle = req->handle;
+ uv_tcp_t* handle = (uv_tcp_t*) req->handle;
assert(!(req->flags & UV_REQ_PENDING));
@@ -992,17 +1017,17 @@ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
if (result != 0) {
err = WSAGetLastError();
if (err != WSA_IO_PENDING) {
- /* Send faild due to an error */
+ /* Send failed due to an error. */
uv_set_sys_error(WSAGetLastError());
return -1;
}
}
if (result == 0) {
- /* Request completed immediately */
+ /* Request completed immediately. */
req->queued_bytes = 0;
} else {
- /* Request queued by the kernel */
+ /* Request queued by the kernel. */
req->queued_bytes = uv_count_bufs(bufs, bufcnt);
handle->write_queue_size += req->queued_bytes;
}
@@ -1016,7 +1041,7 @@ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
int uv_shutdown(uv_req_t* req) {
- uv_handle_t* handle = req->handle;
+ uv_tcp_t* handle = (uv_tcp_t*) req->handle;
int status = 0;
if (!(req->handle->flags & UV_HANDLE_CONNECTION)) {
@@ -1036,14 +1061,13 @@ int uv_shutdown(uv_req_t* req) {
handle->shutdown_req = req;
handle->reqs_pending++;
- uv_want_endgame(handle);
+ uv_want_endgame((uv_handle_t*)handle);
return 0;
}
-static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
- BOOL success;
+static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
@@ -1054,31 +1078,33 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
switch (req->type) {
case UV_WRITE:
- success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
handle->write_queue_size -= req->queued_bytes;
- if (!success) {
- uv_set_sys_error(GetLastError());
- uv_close_error(handle, uv_last_error_);
- }
if (req->cb) {
- ((uv_write_cb)req->cb)(req, success ? 0 : -1);
+ uv_last_error_ = req->error;
+ ((uv_write_cb)req->cb)(req, uv_last_error_.code == UV_OK ? 0 : -1);
}
handle->write_reqs_pending--;
- if (success &&
- handle->write_reqs_pending == 0 &&
+ if (handle->write_reqs_pending == 0 &&
handle->flags & UV_HANDLE_SHUTTING) {
- uv_want_endgame(handle);
+ uv_want_endgame((uv_handle_t*)handle);
}
break;
case UV_READ:
- success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
- if (!success) {
- uv_set_sys_error(GetLastError());
- uv_close_error(handle, uv_last_error_);
+ if (req->error.code != UV_OK) {
+ /* An error occurred doing the 0-read. */
+ /* Stop reading and report error. */
+ handle->flags &= ~UV_HANDLE_READING;
+ uv_last_error_ = req->error;
+ buf.base = 0;
+ buf.len = 0;
+ ((uv_read_cb)handle->read_cb)(handle, -1, buf);
+ break;
}
+
+ /* Do nonblocking reads until the buffer is empty */
while (handle->flags & UV_HANDLE_READING) {
- buf = uv_alloc_(handle, 65536);
+ buf = handle->alloc_cb(handle, 65536);
assert(buf.len > 0);
flags = 0;
if (WSARecv(handle->socket,
@@ -1102,47 +1128,57 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
((uv_read_cb)handle->read_cb)(handle, -1, buf);
- uv_want_endgame(handle);
break;
}
} else {
err = WSAGetLastError();
if (err == WSAEWOULDBLOCK) {
- /* 0-byte read */
+ /* Read buffer was completely empty, report a 0-byte read. */
uv_set_sys_error(WSAEWOULDBLOCK);
((uv_read_cb)handle->read_cb)(handle, 0, buf);
} else {
/* Ouch! serious error. */
uv_set_sys_error(err);
- uv_close_error(handle, uv_last_error_);
+ ((uv_read_cb)handle->read_cb)(handle, -1, buf);
}
break;
}
}
- /* Post another 0-read if still reading and not closing */
+ /* Post another 0-read if still reading and not closing. */
if (!(handle->flags & UV_HANDLE_CLOSING) &&
- !(handle->flags & UV_HANDLE_EOF) &&
handle->flags & UV_HANDLE_READING) {
uv_queue_read(handle);
}
break;
case UV_ACCEPT:
- assert(handle->accept_socket != INVALID_SOCKET);
-
- success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
- success = success && (setsockopt(handle->accept_socket,
- SOL_SOCKET,
- SO_UPDATE_ACCEPT_CONTEXT,
- (char*)&handle->socket,
- sizeof(handle->socket)) == 0);
-
- if (success) {
- if (handle->accept_cb) {
- ((uv_accept_cb)handle->accept_cb)(handle);
+ /* If handle->accepted_socket is not a valid socket, then */
+ /* uv_queue_accept must have failed. This is a serious error. We stop */
+ /* accepting connections and report this error to the connection */
+ /* callback. */
+ if (handle->accept_socket == INVALID_SOCKET) {
+ handle->flags &= ~UV_HANDLE_LISTENING;
+ if (handle->connection_cb) {
+ uv_last_error_ = req->error;
+ ((uv_connection_cb)handle->connection_cb)(handle, -1);
+ }
+ break;
+ }
+
+ if (req->error.code == UV_OK &&
+ setsockopt(handle->accept_socket,
+ SOL_SOCKET,
+ SO_UPDATE_ACCEPT_CONTEXT,
+ (char*)&handle->socket,
+ sizeof(handle->socket)) == 0) {
+ /* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
+ if (handle->connection_cb) {
+ ((uv_connection_cb)handle->connection_cb)(handle, 0);
}
} else {
- /* Errorneous accept is ignored if the listen socket is still healthy. */
+ /* Error related to accepted socket is ignored because the server */
+ /* socket may still be healthy. If the server socket is broken
+ /* uv_queue_accept will detect it. */
closesocket(handle->accept_socket);
if (!(handle->flags & UV_HANDLE_CLOSING)) {
uv_queue_accept(handle);
@@ -1152,11 +1188,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
case UV_CONNECT:
if (req->cb) {
- success = GetOverlappedResult(handle->handle,
- &req->overlapped,
- &bytes,
- FALSE);
- if (success) {
+ if (req->error.code == UV_OK) {
if (setsockopt(handle->socket,
SOL_SOCKET,
SO_UPDATE_CONNECT_CONTEXT,
@@ -1169,7 +1201,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
((uv_connect_cb)req->cb)(req, -1);
}
} else {
- uv_set_sys_error(WSAGetLastError());
+ uv_last_error_ = req->error;
((uv_connect_cb)req->cb)(req, -1);
}
}
@@ -1186,12 +1218,12 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
/* more pending requests. */
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
- uv_want_endgame(handle);
+ uv_want_endgame((uv_handle_t*)handle);
}
}
-static int uv_timer_compare(uv_handle_t* a, uv_handle_t* b) {
+static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
if (a->due < b->due)
return -1;
if (a->due > b->due)
@@ -1204,10 +1236,10 @@ static int uv_timer_compare(uv_handle_t* a, uv_handle_t* b) {
}
-RB_GENERATE_STATIC(uv_timer_s, uv_handle_s, tree_entry, uv_timer_compare);
+RB_GENERATE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
-int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+int uv_timer_init(uv_timer_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_TIMER;
handle->close_cb = (void*) close_cb;
handle->data = data;
@@ -1222,9 +1254,9 @@ int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
}
-int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, int64_t repeat) {
+int uv_timer_start(uv_timer_t* handle, uv_loop_cb timer_cb, int64_t timeout, int64_t repeat) {
if (handle->flags & UV_HANDLE_ACTIVE) {
- RB_REMOVE(uv_timer_s, &uv_timers_, handle);
+ RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle);
}
handle->timer_cb = (void*) timer_cb;
@@ -1232,7 +1264,7 @@ int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, in
handle->repeat = repeat;
handle->flags |= UV_HANDLE_ACTIVE;
- if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) {
+ if (RB_INSERT(uv_timer_tree_s, &uv_timers_, handle) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
@@ -1240,11 +1272,11 @@ int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, in
}
-int uv_timer_stop(uv_handle_t* handle) {
+int uv_timer_stop(uv_timer_t* handle) {
if (!(handle->flags & UV_HANDLE_ACTIVE))
return 0;
- RB_REMOVE(uv_timer_s, &uv_timers_, handle);
+ RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle);
handle->flags &= ~UV_HANDLE_ACTIVE;
@@ -1252,7 +1284,7 @@ int uv_timer_stop(uv_handle_t* handle) {
}
-int uv_timer_again(uv_handle_t* handle) {
+int uv_timer_again(uv_timer_t* handle) {
/* If timer_cb is NULL that means that the timer was never started. */
if (!handle->timer_cb) {
uv_set_sys_error(ERROR_INVALID_DATA);
@@ -1260,14 +1292,14 @@ int uv_timer_again(uv_handle_t* handle) {
}
if (handle->flags & UV_HANDLE_ACTIVE) {
- RB_REMOVE(uv_timer_s, &uv_timers_, handle);
+ RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle);
handle->flags &= ~UV_HANDLE_ACTIVE;
}
if (handle->repeat) {
handle->due = uv_now_ + handle->repeat;
- if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) {
+ if (RB_INSERT(uv_timer_tree_s, &uv_timers_, handle) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
@@ -1278,13 +1310,13 @@ int uv_timer_again(uv_handle_t* handle) {
}
-void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat) {
+void uv_timer_set_repeat(uv_timer_t* handle, int64_t repeat) {
assert(handle->type == UV_TIMER);
handle->repeat = repeat;
}
-int64_t uv_timer_get_repeat(uv_handle_t* handle) {
+int64_t uv_timer_get_repeat(uv_timer_t* handle) {
assert(handle->type == UV_TIMER);
return handle->repeat;
}
@@ -1383,57 +1415,57 @@ static void uv_loop_invoke(uv_handle_t* list) {
}
-int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+int uv_prepare_init(uv_prepare_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_PREPARE;
- return uv_loop_init(handle, close_cb, data);
+ return uv_loop_init((uv_handle_t*)handle, close_cb, data);
}
-int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+int uv_check_init(uv_check_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_CHECK;
- return uv_loop_init(handle, close_cb, data);
+ return uv_loop_init((uv_handle_t*)handle, close_cb, data);
}
-int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+int uv_idle_init(uv_idle_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_IDLE;
- return uv_loop_init(handle, close_cb, data);
+ return uv_loop_init((uv_handle_t*)handle, close_cb, data);
}
-int uv_prepare_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
+int uv_prepare_start(uv_prepare_t* handle, uv_loop_cb loop_cb) {
assert(handle->type == UV_PREPARE);
- return uv_loop_start(handle, loop_cb, &uv_prepare_handles_);
+ return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_prepare_handles_);
}
-int uv_check_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
+int uv_check_start(uv_check_t* handle, uv_loop_cb loop_cb) {
assert(handle->type == UV_CHECK);
- return uv_loop_start(handle, loop_cb, &uv_check_handles_);
+ return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_check_handles_);
}
-int uv_idle_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
+int uv_idle_start(uv_idle_t* handle, uv_loop_cb loop_cb) {
assert(handle->type == UV_IDLE);
- return uv_loop_start(handle, loop_cb, &uv_idle_handles_);
+ return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_idle_handles_);
}
-int uv_prepare_stop(uv_handle_t* handle) {
+int uv_prepare_stop(uv_prepare_t* handle) {
assert(handle->type == UV_PREPARE);
- return uv_loop_stop(handle, &uv_prepare_handles_);
+ return uv_loop_stop((uv_handle_t*)handle, &uv_prepare_handles_);
}
-int uv_check_stop(uv_handle_t* handle) {
+int uv_check_stop(uv_check_t* handle) {
assert(handle->type == UV_CHECK);
- return uv_loop_stop(handle, &uv_check_handles_);
+ return uv_loop_stop((uv_handle_t*)handle, &uv_check_handles_);
}
-int uv_idle_stop(uv_handle_t* handle) {
+int uv_idle_stop(uv_idle_t* handle) {
assert(handle->type == UV_IDLE);
- return uv_loop_stop(handle, &uv_idle_handles_);
+ return uv_loop_stop((uv_handle_t*)handle, &uv_idle_handles_);
}
@@ -1451,7 +1483,7 @@ int uv_is_active(uv_handle_t* handle) {
}
-int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
+int uv_async_init(uv_async_t* handle, uv_async_cb async_cb,
uv_close_cb close_cb, void* data) {
uv_req_t* req;
@@ -1463,7 +1495,7 @@ int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
handle->error = uv_ok_;
req = &handle->async_req;
- uv_req_init(req, handle, async_cb);
+ uv_req_init(req, (uv_handle_t*)handle, async_cb);
req->type = UV_WAKEUP;
uv_refs_++;
@@ -1472,7 +1504,7 @@ int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
}
-int uv_async_send(uv_handle_t* handle) {
+int uv_async_send(uv_async_t* handle) {
if (handle->type != UV_ASYNC) {
/* Can't set errno because that's not thread-safe. */
return -1;
@@ -1495,124 +1527,161 @@ int uv_async_send(uv_handle_t* handle) {
}
-static void uv_async_return_req(uv_handle_t* handle, uv_req_t* req) {
+static void uv_async_return_req(uv_async_t* handle, uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
handle->async_sent = 0;
if (req->cb) {
- ((uv_async_cb)req->cb)(handle, 0);
+ ((uv_async_cb)req->cb)((uv_handle_t*)handle, 0);
}
if (handle->flags & UV_HANDLE_CLOSING) {
- uv_want_endgame(handle);
+ uv_want_endgame((uv_handle_t*)handle);
}
}
-static void uv_poll() {
- BOOL success;
- DWORD bytes;
- ULONG_PTR key;
- OVERLAPPED* overlapped;
+static void uv_process_reqs() {
uv_req_t* req;
uv_handle_t* handle;
- DWORD timeout;
- int64_t delta;
- /* Call all pending close callbacks. */
- /* TODO: ugly, fixme. */
- uv_call_endgames();
- if (uv_refs_ == 0)
- return;
+ while (req = uv_remove_pending_req()) {
+ handle = req->handle;
- uv_loop_invoke(uv_prepare_handles_);
+ switch (handle->type) {
+ case UV_TCP:
+ uv_tcp_return_req((uv_tcp_t*)handle, req);
+ break;
+
+ case UV_ASYNC:
+ uv_async_return_req((uv_async_t*)handle, req);
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+}
- uv_update_time();
+
+static void uv_process_timers() {
+ uv_timer_t* timer;
+
+ /* Call timer callbacks */
+ for (timer = RB_MIN(uv_timer_tree_s, &uv_timers_);
+ timer != NULL && timer->due <= uv_now_;
+ timer = RB_MIN(uv_timer_tree_s, &uv_timers_)) {
+ RB_REMOVE(uv_timer_tree_s, &uv_timers_, timer);
+
+ if (timer->repeat != 0) {
+ /* If it is a repeating timer, reschedule with repeat timeout. */
+ timer->due += timer->repeat;
+ if (timer->due < uv_now_) {
+ timer->due = uv_now_;
+ }
+ if (RB_INSERT(uv_timer_tree_s, &uv_timers_, timer) != NULL) {
+ uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
+ }
+ } else {
+ /* If non-repeating, mark the timer as inactive. */
+ timer->flags &= ~UV_HANDLE_ACTIVE;
+ }
+
+ ((uv_loop_cb) timer->timer_cb)((uv_handle_t*)timer, 0);
+ }
+}
+
+
+static DWORD uv_get_poll_timeout() {
+ uv_timer_t* timer;
+ int64_t delta;
/* Check if there are any running timers */
- handle = RB_MIN(uv_timer_s, &uv_timers_);
- if (handle) {
- delta = handle->due - uv_now_;
+ timer = RB_MIN(uv_timer_tree_s, &uv_timers_);
+ if (timer) {
+ uv_update_time();
+
+ delta = timer->due - uv_now_;
if (delta >= UINT_MAX) {
/* Can't have a timeout greater than UINT_MAX, and a timeout value of */
/* UINT_MAX means infinite, so that's no good either. */
- timeout = UINT_MAX - 1;
+ return UINT_MAX - 1;
} else if (delta < 0) {
/* Negative timeout values are not allowed */
- timeout = 0;
+ return 0;
} else {
- timeout = (DWORD)delta;
+ return (DWORD)delta;
}
} else {
/* No timers */
- timeout = INFINITE;
+ return INFINITE;
}
+}
+
+
+static void uv_poll() {
+ BOOL success;
+ DWORD bytes;
+ ULONG_PTR key;
+ OVERLAPPED* overlapped;
+ uv_req_t* req;
success = GetQueuedCompletionStatus(uv_iocp_,
&bytes,
&key,
&overlapped,
- timeout);
+ uv_get_poll_timeout());
uv_update_time();
- /* Call check callbacks */
- uv_loop_invoke(uv_check_handles_);
-
- /* Call timer callbacks */
- for (handle = RB_MIN(uv_timer_s, &uv_timers_);
- handle != NULL && handle->due <= uv_now_;
- handle = RB_MIN(uv_timer_s, &uv_timers_)) {
- RB_REMOVE(uv_timer_s, &uv_timers_, handle);
+ if (overlapped) {
+ /* Package was dequeued */
+ req = uv_overlapped_to_req(overlapped);
- if (handle->repeat != 0) {
- /* If it is a repeating timer, reschedule with repeat timeout. */
- handle->due += handle->repeat;
- if (handle->due < uv_now_) {
- handle->due = uv_now_;
- }
- if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) {
- uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
- }
+ if (success) {
+ req->error = uv_ok_;
} else {
- /* If non-repeating, mark the timer as inactive. */
- handle->flags &= ~UV_HANDLE_ACTIVE;
+ req->error = uv_new_sys_error(GetLastError());
}
- ((uv_loop_cb) handle->timer_cb)(handle, 0);
- }
+ uv_insert_pending_req(req);
- /* Only if a iocp package was dequeued... */
- if (overlapped) {
- req = uv_overlapped_to_req(overlapped);
- handle = req->handle;
+ } else if (GetLastError() != WAIT_TIMEOUT) {
+ /* Serious error */
+ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
+ }
+}
- switch (handle->type) {
- case UV_TCP:
- uv_tcp_return_req(handle, req);
- break;
- case UV_ASYNC:
- uv_async_return_req(handle, req);
- break;
+int uv_run() {
+ while (1) {
+ uv_update_time();
+ uv_process_timers();
+
+ /* Terrible: please fix me! */
+ while (uv_refs_ > 0 &&
+ (uv_idle_handles_ || uv_pending_reqs_tail_ || uv_endgame_handles_)) {
+ /* Terrible: please fix me! */
+ while (uv_pending_reqs_tail_ || uv_endgame_handles_) {
+ uv_process_endgames();
+ uv_process_reqs();
+ }
- default:
- assert(0);
+ /* Call idle callbacks */
+ uv_loop_invoke(uv_idle_handles_);
}
- } /* if (overlapped) */
- /* Call idle callbacks */
- while (uv_idle_handles_) {
- uv_call_endgames();
- uv_loop_invoke(uv_idle_handles_);
- }
-}
+ if (uv_refs_ <= 0) {
+ break;
+ }
+ uv_loop_invoke(uv_prepare_handles_);
-int uv_run() {
- while (uv_refs_ > 0) {
uv_poll();
+
+ uv_loop_invoke(uv_check_handles_);
}
+
assert(uv_refs_ == 0);
return 0;
}
@@ -1626,3 +1695,53 @@ void uv_ref() {
void uv_unref() {
uv_refs_--;
}
+
+
+int uv_utf16_to_utf8(wchar_t* utf16Buffer, size_t utf16Size, char* utf8Buffer, size_t utf8Size) {
+ return WideCharToMultiByte(CP_UTF8, 0, utf16Buffer, utf16Size, utf8Buffer, utf8Size, NULL, NULL);
+}
+
+
+int uv_get_exepath(char* buffer, size_t* size) {
+ int retVal;
+ size_t utf16Size;
+ wchar_t* utf16Buffer;
+
+ if (!buffer || !size) {
+ return -1;
+ }
+
+ utf16Buffer = (wchar_t*)malloc(sizeof(wchar_t) * *size);
+ if (!utf16Buffer) {
+ retVal = -1;
+ goto done;
+ }
+
+ /* Get the path as UTF-16 */
+ utf16Size = GetModuleFileNameW(NULL, utf16Buffer, *size - 1);
+ if (utf16Size <= 0) {
+ uv_set_sys_error(GetLastError());
+ retVal = -1;
+ goto done;
+ }
+
+ utf16Buffer[utf16Size] = L'\0';
+
+ /* Convert to UTF-8 */
+ *size = uv_utf16_to_utf8(utf16Buffer, utf16Size, buffer, *size);
+ if (!*size) {
+ uv_set_sys_error(GetLastError());
+ retVal = -1;
+ goto done;
+ }
+
+ buffer[*size] = '\0';
+ retVal = 0;
+
+done:
+ if (utf16Buffer) {
+ free(utf16Buffer);
+ }
+
+ return retVal;
+}
diff --git a/deps/uv/uv-win.h b/deps/uv/uv-win.h
index 83b73630b0..e87c5828a7 100644
--- a/deps/uv/uv-win.h
+++ b/deps/uv/uv-win.h
@@ -41,7 +41,7 @@ typedef struct uv_buf_t {
char* base;
} uv_buf_t;
-#define uv_req_private_fields \
+#define UV_REQ_PRIVATE_FIELDS \
union { \
/* Used by I/O operations */ \
struct { \
@@ -49,21 +49,24 @@ typedef struct uv_buf_t {
size_t queued_bytes; \
}; \
}; \
- int flags;
+ int flags; \
+ uv_err_t error; \
+ struct uv_req_s* next_req;
#define uv_tcp_connection_fields \
+ uv_alloc_cb alloc_cb; \
void* read_cb; \
struct uv_req_s read_req; \
unsigned int write_reqs_pending; \
uv_req_t* shutdown_req;
#define uv_tcp_server_fields \
- void *accept_cb; \
+ void *connection_cb; \
SOCKET accept_socket; \
struct uv_req_s accept_req; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
-#define uv_tcp_fields \
+#define UV_TCP_PRIVATE_FIELDS \
unsigned int reqs_pending; \
union { \
SOCKET socket; \
@@ -74,29 +77,36 @@ typedef struct uv_buf_t {
struct { uv_tcp_server_fields }; \
};
-#define uv_timer_fields \
- RB_ENTRY(uv_handle_s) tree_entry; \
+#define UV_TIMER_PRIVATE_FIELDS \
+ RB_ENTRY(uv_timer_s) tree_entry; \
int64_t due; \
int64_t repeat; \
void* timer_cb;
-#define uv_loop_fields \
+#define UV_LOOP_PRIVATE_FIELDS \
uv_handle_t* loop_prev; \
uv_handle_t* loop_next; \
void* loop_cb;
-#define uv_async_fields \
+#define UV_ASYNC_PRIVATE_FIELDS \
struct uv_req_s async_req; \
/* char to avoid alignment issues */ \
char volatile async_sent;
-#define uv_handle_private_fields \
+#define UV_PREPARE_PRIVATE_FIELDS /* empty */
+#define UV_CHECK_PRIVATE_FIELDS /* empty */
+#define UV_IDLE_PRIVATE_FIELDS /* empty */
+
+/*
+ * TODO: remove UV_LOOP_PRIVATE_FIELDS from UV_HANDLE_PRIVATE_FIELDS and
+ * use it in UV_(PREPARE|CHECK|IDLE)_PRIVATE_FIELDS instead.
+ */
+
+#define UV_HANDLE_PRIVATE_FIELDS \
uv_handle_t* endgame_next; \
unsigned int flags; \
uv_err_t error; \
- union { \
- struct { uv_tcp_fields }; \
- struct { uv_timer_fields }; \
- struct { uv_loop_fields }; \
- struct { uv_async_fields }; \
- };
+ UV_LOOP_PRIVATE_FIELDS
+
+
+int uv_utf16_to_utf8(wchar_t* utf16Buffer, size_t utf16Size, char* utf8Buffer, size_t utf8Size);
diff --git a/deps/uv/uv.h b/deps/uv/uv.h
index 0b4984bf54..b28dec385e 100644
--- a/deps/uv/uv.h
+++ b/deps/uv/uv.h
@@ -33,6 +33,11 @@ extern "C" {
typedef struct uv_err_s uv_err_t;
typedef struct uv_handle_s uv_handle_t;
+typedef struct uv_tcp_s uv_tcp_t;
+typedef struct uv_timer_s uv_timer_t;
+typedef struct uv_prepare_s uv_prepare_t;
+typedef struct uv_check_s uv_check_t;
+typedef struct uv_idle_s uv_idle_t;
typedef struct uv_req_s uv_req_t;
@@ -51,12 +56,12 @@ typedef struct uv_req_s uv_req_t;
* In the case of uv_read_cb the uv_buf_t returned should be freed by the
* user.
*/
-typedef uv_buf_t (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size);
-typedef void (*uv_read_cb)(uv_handle_t *handle, int nread, uv_buf_t buf);
+typedef uv_buf_t (*uv_alloc_cb)(uv_tcp_t* tcp, size_t suggested_size);
+typedef void (*uv_read_cb)(uv_tcp_t* tcp, int nread, uv_buf_t buf);
typedef void (*uv_write_cb)(uv_req_t* req, int status);
typedef void (*uv_connect_cb)(uv_req_t* req, int status);
typedef void (*uv_shutdown_cb)(uv_req_t* req, int status);
-typedef void (*uv_accept_cb)(uv_handle_t* handle);
+typedef void (*uv_connection_cb)(uv_tcp_t* server, int status);
typedef void (*uv_close_cb)(uv_handle_t* handle, int status);
/* TODO: do loop_cb and async_cb really need a status argument? */
typedef void (*uv_loop_cb)(uv_handle_t* handle, int status);
@@ -142,70 +147,67 @@ struct uv_req_s {
void* cb;
void* data;
/* private */
- uv_req_private_fields
+ UV_REQ_PRIVATE_FIELDS
};
+/*
+ * Initialize a request for use with uv_write, uv_shutdown, or uv_connect.
+ */
+void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
+
+#define UV_HANDLE_FIELDS \
+ /* read-only */ \
+ uv_handle_type type; \
+ /* public */ \
+ uv_close_cb close_cb; \
+ void* data; \
+ /* private */ \
+ UV_HANDLE_PRIVATE_FIELDS \
+
+/* The abstract base class of all handles. */
struct uv_handle_s {
- /* read-only */
- uv_handle_type type;
- /* public */
- uv_close_cb close_cb;
- void* data;
- /* number of bytes queued for writing */
- size_t write_queue_size;
- /* private */
- uv_handle_private_fields
+ UV_HANDLE_FIELDS
};
-
-/* Most functions return boolean: 0 for success and -1 for failure.
- * On error the user should then call uv_last_error() to determine
- * the error code.
+/*
+ * Returns 1 if the prepare/check/idle handle has been started, 0 otherwise.
+ * For other handle types this always returns 1.
*/
-uv_err_t uv_last_error();
-char* uv_strerror(uv_err_t err);
-const char* uv_err_name(uv_err_t err);
-
-void uv_init(uv_alloc_cb alloc);
-int uv_run();
+int uv_is_active(uv_handle_t* handle);
-/* Manually modify the event loop's reference count. Useful if the user wants
- * to have a handle or timeout that doesn't keep the loop alive.
+/*
+ * Request handle to be closed. close_cb will be called asynchronously after
+ * this call. This MUST be called on each handle before memory is released.
*/
-void uv_ref();
-void uv_unref();
-
-void uv_update_time();
-int64_t uv_now();
+int uv_close(uv_handle_t* handle);
-void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
/*
- * TODO:
- * - uv_(pipe|pipe_tty)_handle_init
- * - uv_bind_pipe(char* name)
- * - uv_continuous_read(uv_handle_t* handle, uv_continuous_read_cb* cb)
- * - A way to list cancelled uv_reqs after before/on uv_close_cb
+ * A subclass of uv_handle_t representing a TCP stream or TCP server. In the
+ * future this will probably be split into two classes - one a stream and
+ * the other a server.
*/
+struct uv_tcp_s {
+ UV_HANDLE_FIELDS
+ size_t write_queue_size; /* number of bytes queued for writing */
+ UV_TCP_PRIVATE_FIELDS
+};
-/* TCP socket methods.
- * Handle and callback bust be set by calling uv_req_init.
- */
-int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
-int uv_bind(uv_handle_t* handle, struct sockaddr* addr);
+int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data);
+
+int uv_bind(uv_tcp_t* handle, struct sockaddr_in);
+
+int uv_connect(uv_req_t* req, struct sockaddr_in);
-int uv_connect(uv_req_t* req, struct sockaddr* addr);
int uv_shutdown(uv_req_t* req);
-/* TCP server methods. */
-int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb);
+int uv_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
-/* Call this after accept_cb. client does not need to be initialized. */
-int uv_accept(uv_handle_t* server, uv_handle_t* client,
+/* Call this after connection_cb. client does not need to be initialized. */
+int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data);
-
/* Read data from an incoming stream. The callback will be made several
* several times until there is no more data to read or uv_read_stop is
* called. When we've reached EOF nread will be set to -1 and the error is
@@ -215,82 +217,157 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
* eof; it happens when libuv requested a buffer through the alloc callback
* but then decided that it didn't need that buffer.
*/
-int uv_read_start(uv_handle_t* handle, uv_read_cb cb);
-int uv_read_stop(uv_handle_t* handle);
+int uv_read_start(uv_tcp_t*, uv_alloc_cb alloc_cb, uv_read_cb read_cb);
+
+int uv_read_stop(uv_tcp_t*);
int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt);
-/* Timer methods */
-int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
-int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout, int64_t repeat);
-int uv_timer_stop(uv_handle_t* handle);
+
/*
- * Stop the timer, and if it is repeating restart it using the repeat value
+ * Subclass of uv_handle_t. libev wrapper. Every active prepare handle gets
+ * its callback called exactly once per loop iteration, just before the
+ * system blocks to wait for completed i/o.
+ */
+struct uv_prepare_s {
+ UV_HANDLE_FIELDS
+ UV_PREPARE_PRIVATE_FIELDS
+};
+
+int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data);
+
+int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb);
+
+int uv_prepare_stop(uv_prepare_t* prepare);
+
+
+/*
+ * Subclass of uv_handle_t. libev wrapper. Every active check handle gets
+ * its callback called exactly once per loop iteration, just after the
+ * system returns from blocking.
+ */
+struct uv_check_s {
+ UV_HANDLE_FIELDS
+ UV_CHECK_PRIVATE_FIELDS
+};
+
+int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data);
+
+int uv_check_start(uv_check_t* check, uv_loop_cb cb);
+
+int uv_check_stop(uv_check_t* check);
+
+
+/*
+ * Subclass of uv_handle_t. libev wrapper. Every active idle handle gets its
+ * callback called repeatedly until it is stopped. This happens after all
+ * other types of callbacks are processed. When there are multiple "idle"
+ * handles active, their callbacks are called in turn.
+ */
+struct uv_idle_s {
+ UV_HANDLE_FIELDS
+ UV_IDLE_PRIVATE_FIELDS
+};
+
+int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data);
+
+int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb);
+
+int uv_idle_stop(uv_idle_t* idle);
+
+
+/*
+ * Subclass of uv_handle_t. libev wrapper. uv_async_send wakes up the event
+ * loop and calls the async handle's callback There is no guarantee that
+ * every uv_async_send call leads to exactly one invocation of the callback;
+ * The only guarantee is that the callback function is called at least once
+ * after the call to async_send. Unlike all other libuv functions,
+ * uv_async_send can be called from another thread.
+ */
+typedef struct {
+ UV_HANDLE_FIELDS
+ UV_ASYNC_PRIVATE_FIELDS
+} uv_async_t;
+
+int uv_async_init(uv_async_t* async, uv_async_cb async_cb,
+ uv_close_cb close_cb, void* data);
+
+int uv_async_send(uv_async_t* async);
+
+
+/*
+ * Subclass of uv_handle_t. Wraps libev's ev_timer watcher. Used to get
+ * woken up at a specified time in the future.
+ */
+struct uv_timer_s {
+ UV_HANDLE_FIELDS
+ UV_TIMER_PRIVATE_FIELDS
+};
+
+int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data);
+
+int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout, int64_t repeat);
+
+int uv_timer_stop(uv_timer_t* timer);
+
+/*
+ * Stop the timer, and if it is repeating restart it using the repeat value
* as the timeout. If the timer has never been started before it returns -1 and
* sets the error to UV_EINVAL.
*/
-int uv_timer_again(uv_handle_t* handle);
+int uv_timer_again(uv_timer_t* timer);
+
/*
* Set the repeat value. Note that if the repeat value is set from a timer
* callback it does not immediately take effect. If the timer was nonrepeating
* before, it will have been stopped. If it was repeating, then the old repeat
* value will have been used to schedule the next timeout.
*/
-void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat);
-int64_t uv_timer_get_repeat(uv_handle_t* handle);
+void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat);
-/* libev wrapper. Every active prepare handle gets its callback called
- * exactly once per loop iteration, just before the system blocks to wait
- * for completed i/o.
- */
-int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
-int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb);
-int uv_prepare_stop(uv_handle_t* handle);
+int64_t uv_timer_get_repeat(uv_timer_t* timer);
-/* libev wrapper. Every active check handle gets its callback called exactly
- * once per loop iteration, just after the system returns from blocking.
- */
-int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
-int uv_check_start(uv_handle_t* handle, uv_loop_cb cb);
-int uv_check_stop(uv_handle_t* handle);
-
-/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
- * stopped. This happens after all other types of callbacks are processed.
- * When there are multiple "idle" handles active, their callbacks are called
- * in turn.
- */
-int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
-int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb);
-int uv_idle_stop(uv_handle_t* handle);
-/* Returns 1 if the prepare/check/idle handle has been started, 0 otherwise.
- * For other handle types this always returns 1.
+/*
+ * Most functions return boolean: 0 for success and -1 for failure.
+ * On error the user should then call uv_last_error() to determine
+ * the error code.
*/
-int uv_is_active(uv_handle_t* handle);
+uv_err_t uv_last_error();
+char* uv_strerror(uv_err_t err);
+const char* uv_err_name(uv_err_t err);
-/* libev wrapper. uv_async_send wakes up the event loop and calls the async
- * handle's callback There is no guarantee that every uv_async_send call
- * leads to exactly one invocation of the callback; The only guarantee is
- * that the callback function is called at least once after the call to
- * async_send. Unlike everything else, uv_async_send can be called from
- * another thread.
- *
- * QUESTION(ryan) Can UV_ASYNC just use uv_loop_cb? Same signature on my
- * side.
- */
-int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
- uv_close_cb close_cb, void* data);
-int uv_async_send(uv_handle_t* handle);
+void uv_init();
+int uv_run();
-/* Request handle to be closed. close_cb will be called
- * asynchronously after this call.
+/*
+ * Manually modify the event loop's reference count. Useful if the user wants
+ * to have a handle or timeout that doesn't keep the loop alive.
*/
-int uv_close(uv_handle_t* handle);
+void uv_ref();
+void uv_unref();
+
+void uv_update_time();
+int64_t uv_now();
/* Utility */
struct sockaddr_in uv_ip4_addr(char* ip, int port);
+/* Gets the executable path */
+int uv_get_exepath(char* buffer, size_t* size);
+
+
+/* the presence of this union forces similar struct layout */
+union uv_any_handle {
+ uv_tcp_t tcp;
+ uv_prepare_t prepare;
+ uv_check_t check;
+ uv_idle_t idle;
+ uv_async_t async;
+ uv_timer_t timer;
+};
+
#ifdef __cplusplus
}
#endif