summaryrefslogtreecommitdiff
path: root/deps/uv/src/unix/linux-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/uv/src/unix/linux-core.c')
-rw-r--r--deps/uv/src/unix/linux-core.c69
1 files changed, 49 insertions, 20 deletions
diff --git a/deps/uv/src/unix/linux-core.c b/deps/uv/src/unix/linux-core.c
index fb8ac3f237..b48a111170 100644
--- a/deps/uv/src/unix/linux-core.c
+++ b/deps/uv/src/unix/linux-core.c
@@ -18,6 +18,11 @@
* IN THE SOFTWARE.
*/
+/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
+ * EPOLL* counterparts. We use the POLL* variants in this file because that
+ * is what libuv uses elsewhere and it avoids a dependency on <sys/epoll.h>.
+ */
+
#include "uv.h"
#include "internal.h"
@@ -69,7 +74,9 @@
#endif
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
-static int read_times(FILE* statfile_fp, unsigned int numcpus, uv_cpu_info_t* ci);
+static int read_times(FILE* statfile_fp,
+ unsigned int numcpus,
+ uv_cpu_info_t* ci);
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static unsigned long read_cpufreq(unsigned int cpunum);
@@ -102,7 +109,7 @@ int uv__platform_loop_init(uv_loop_t* loop) {
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->inotify_fd == -1) return;
- uv__io_stop(loop, &loop->inotify_read_watcher, UV__POLLIN);
+ uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
uv__close(loop->inotify_fd);
loop->inotify_fd = -1;
}
@@ -144,7 +151,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct uv__epoll_event e;
int rc;
- e.events = UV__EPOLLIN;
+ e.events = POLLIN;
e.data = -1;
rc = 0;
@@ -181,6 +188,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigset_t sigset;
uint64_t sigmask;
uint64_t base;
+ int have_signals;
int nevents;
int count;
int nfds;
@@ -308,6 +316,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
goto update_timeout;
}
+ have_signals = 0;
nevents = 0;
assert(loop->watchers != NULL);
@@ -341,7 +350,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
- pe->events &= w->pevents | UV__POLLERR | UV__POLLHUP;
+ pe->events &= w->pevents | POLLERR | POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
@@ -358,17 +367,31 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
- if (pe->events == UV__EPOLLERR || pe->events == UV__EPOLLHUP)
- pe->events |= w->pevents & (UV__EPOLLIN | UV__EPOLLOUT);
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |= w->pevents & (POLLIN | POLLOUT);
if (pe->events != 0) {
- w->cb(loop, w, pe->events);
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher)
+ have_signals = 1;
+ else
+ w->cb(loop, w, pe->events);
+
nevents++;
}
}
+
+ if (have_signals != 0)
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
@@ -557,7 +580,7 @@ static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
char buf[1024];
if (!fgets(buf, sizeof(buf), statfile_fp))
- abort();
+ return -EIO;
num = 0;
while (fgets(buf, sizeof(buf), statfile_fp)) {
@@ -566,6 +589,9 @@ static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
num++;
}
+ if (num == 0)
+ return -EIO;
+
*numcpus = num;
return 0;
}
@@ -586,26 +612,20 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
err = uv__cpu_num(statfile_fp, &numcpus);
if (err < 0)
- return err;
-
- assert(numcpus != (unsigned int) -1);
- assert(numcpus != 0);
+ goto out;
+ err = -ENOMEM;
ci = uv__calloc(numcpus, sizeof(*ci));
if (ci == NULL)
- return -ENOMEM;
+ goto out;
err = read_models(numcpus, ci);
if (err == 0)
err = read_times(statfile_fp, numcpus, ci);
- if (fclose(statfile_fp))
- if (errno != EINTR && errno != EINPROGRESS)
- abort();
-
if (err) {
uv_free_cpu_info(ci, numcpus);
- return err;
+ goto out;
}
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
@@ -616,8 +636,15 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
*cpu_infos = ci;
*count = numcpus;
+ err = 0;
- return 0;
+out:
+
+ if (fclose(statfile_fp))
+ if (errno != EINTR && errno != EINPROGRESS)
+ abort();
+
+ return err;
}
@@ -727,7 +754,9 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
}
-static int read_times(FILE* statfile_fp, unsigned int numcpus, uv_cpu_info_t* ci) {
+static int read_times(FILE* statfile_fp,
+ unsigned int numcpus,
+ uv_cpu_info_t* ci) {
unsigned long clock_ticks;
struct uv_cpu_times_s ts;
unsigned long user;