commit 282267846cbdf67e615e23de1a867235706eddf6
parent b928149f1dacf4f0e577896b1caf970120b690cf
Author: Evgeny Grin (Karlson2k) <k2k@drgrin.dev>
Date: Tue, 10 Mar 2026 01:56:08 +0100
Implemented kqueue support
Diffstat:
14 files changed, 1676 insertions(+), 225 deletions(-)
diff --git a/configure.ac b/configure.ac
@@ -1,6 +1,6 @@
# This file is part of libmicrohttpd.
# (C) 2006-2021 Christian Grothoff (and other contributing authors)
-# (C) 2014-2024 Evgeny Grin (Karlson2k)
+# (C) 2014-2026 Evgeny Grin (Karlson2k)
#
# libmicrohttpd is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
@@ -3740,6 +3740,74 @@ AS_IF([test "x$enable_epoll" = "xyes"],
]
)
+AC_ARG_ENABLE([[kqueue]],
+ [AS_HELP_STRING([[--enable-kqueue[=ARG]]], [enable kqueue support (yes, no, auto) [auto]])],
+ [
+ AS_CASE([$enableval],
+ [yes|no|auto],[:],
+ [AC_MSG_ERROR([invalid parameter value --enable-kqueue=${enableval}])]
+ )
+ ],[enable_kqueue='auto']
+)
+AS_VAR_IF([enable_kqueue],["no"],[:],
+ [
+ MHD_CHECK_FUNC_RUN([kqueue],
+ [[
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/time.h>
+#include <unistd.h>
+ ]],
+ [[
+ int kq = kqueue();
+ i][f (0 > kq) return -kq;
+ (void) close(kq);
+ return 0;
+ ]],
+ [cacheVar="assuming yes"],
+ [
+ AC_DEFINE([[HAVE_KQUEUE]],[[1]],[Define to '1' if kqueue is supported on your platform])
+ AC_DEFINE([[MHD_SUPPORT_KQUEUE]],[[1]],[Define to '1' to enable 'kqueue' functionality])
+ enable_kqueue='yes'
+ ],
+ [
+ AS_VAR_IF([enable_kqueue],["yes"],
+ [AC_MSG_ERROR([[Support for kqueue was explicitly requested but cannot be enabled on this platform.]])]
+ )
+ enable_kqueue='no'
+ ]
+ )
+ ]
+)
+AM_CONDITIONAL([MHD_SUPPORT_KQUEUE], [[test "x${enable_kqueue}" = "xyes"]])
+
+AS_VAR_IF([enable_kqueue],["yes"],
+ [
+ MHD_CHECK_FUNC([kqueuex],
+ [[
+#include <sys/event.h>
+ ]],
+ [[
+ i][f (0 > kqueuex(KQUEUE_CLOEXEC))
+ return 3;
+ ]]
+ )
+ MHD_CHECK_FUNC([kqueue1],
+ [[
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/time.h>
+#include <fcntl.h>
+ ]],
+ [[
+ i][f (0 > kqueue1(O_CLOEXEC))
+ return 3;
+ ]]
+ )
+ ]
+)
+
+
AC_CACHE_CHECK([for supported 'noreturn' keyword], [mhd_cv_decl_noreturn],
[
mhd_cv_decl_noreturn="none"
@@ -9395,6 +9463,7 @@ AC_MSG_NOTICE([[${PACKAGE_NAME} ${PACKAGE_VERSION} Configuration Summary:
select() support : ${enable_select}
poll() support : ${enable_poll=no}
epoll support : ${enable_epoll=no}
+ kqueue support : ${enable_kqueue=no}
sendfile() : ${found_sendfile}
HTTPS support : ${MSG_HTTPS}
TLS backends : ${MSG_TLS_BACKENDS}
diff --git a/src/include/microhttpd2.h b/src/include/microhttpd2.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2006-2025 Christian Grothoff, Karlson2k (Evgeny Grin)
+ Copyright (C) 2006-2026 Christian Grothoff, Karlson2k (Evgeny Grin)
(and other contributing authors)
GNU libmicrohttpd is free software; you can redistribute it and/or
@@ -1022,30 +1022,38 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE = 50062
,
/**
- * Failed to allocate memory for daemon's fd_sets
+ * Failed to allocate memory for daemon's events data, like fd_sets,
+ * poll, epoll or kqueue structures.
*/
- MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE = 50063
+ MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE = 50063
,
/**
- * Failed to allocate memory for poll() structures
+ * Failed to add daemon's FDs (ITC and/or listening) to the internal events
+ * monitoring
*/
- MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE = 50063
+ MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE = 50065
,
/**
- * Failed to allocate memory for epoll data
+ * Failed to register daemon's FDs (ITC or listening) in the application
+ * (external event) monitoring
*/
- MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE = 50064
+ MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE = 50066
,
/**
- * Failed to add daemon's FDs (ITC and/or listening) to the epoll monitoring
+ * Failed to create kqueue FD
*/
- MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE = 50065
+ MHD_SC_KQUEUE_FD_CREATE_FAILED = 50067
,
/**
- * Failed to register daemon's FDs (ITC or listening) in the application
- * (external event) monitoring
+ * Failed to configure kqueue FD to be non-inheritable.
*/
- MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE = 50066
+ MHD_SC_KQUEUE_FD_SET_NOINHERIT_FAILED = 50068
+ ,
+ /**
+ * The kqueue FD cannot be used because the FD number is higher
+ * than the limit set by application.
+ */
+ MHD_SC_KQUEUE_FD_OUTSIDE_OF_SET_RANGE = 50069
,
/**
* The select() syscall is not available on this platform or in this MHD
@@ -1066,6 +1074,12 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE = 50072
,
/**
+ * The kqueue syscalls are not available on this platform or in this MHD
+ * build.
+ */
+ MHD_SC_KQUEUE_SYSCALL_NOT_AVAILABLE = 50073
+ ,
+ /**
* Failed to obtain our listen port via introspection.
* FIXME: remove?
*/
@@ -1172,9 +1186,9 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_UNEXPECTED_SELECT_ERROR = 50116
,
/**
- * Failed to remove a socket to the epoll set.
+ * Failed to remove a connection socket to the epoll or kqueue monitoring.
*/
- MHD_SC_EPOLL_CTL_REMOVE_FAILED = 50117
+ MHD_SC_EVENTS_CONN_REMOVE_FAILED = 50117
,
/**
* poll() is not supported.
@@ -1212,6 +1226,11 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_EPOLL_HARD_ERROR = 50130
,
/**
+ * Encountered an unrecoverable error from kevent() function.
+ */
+ MHD_SC_KQUEUE_HARD_ERROR = 50131
+ ,
+ /**
* We failed to configure accepted socket
* to not use a SIGPIPE.
*/
@@ -3488,6 +3507,11 @@ enum MHD_FIXED_ENUM_APP_SET_ MHD_SockPollSyscall
* Use epoll.
*/
MHD_SPS_EPOLL = 3
+ ,
+ /**
+ * Use kqueue.
+ */
+ MHD_SPS_KQUEUE = 4
};
@@ -9048,6 +9072,10 @@ struct MHD_LibInfoFixedPollingFunc
* epoll technique for sockets polling
*/
enum MHD_Bool tech_epoll;
+ /**
+ * kqueue technique for sockets polling
+ */
+ enum MHD_Bool tech_kqueue;
};
/**
diff --git a/src/include/microhttpd2_main.h.in b/src/include/microhttpd2_main.h.in
@@ -4166,6 +4166,10 @@ struct MHD_LibInfoFixedPollingFunc
* epoll technique for sockets polling
*/
enum MHD_Bool tech_epoll;
+ /**
+ * kqueue technique for sockets polling
+ */
+ enum MHD_Bool tech_kqueue;
};
/**
diff --git a/src/include/microhttpd2_preamble.h.in b/src/include/microhttpd2_preamble.h.in
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2006-2025 Christian Grothoff, Karlson2k (Evgeny Grin)
+ Copyright (C) 2006-2026 Christian Grothoff, Karlson2k (Evgeny Grin)
(and other contributing authors)
GNU libmicrohttpd is free software; you can redistribute it and/or
@@ -1022,30 +1022,38 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE = 50062
,
/**
- * Failed to allocate memory for daemon's fd_sets
+ * Failed to allocate memory for daemon's events data, like fd_sets,
+ * poll, epoll or kqueue structures.
*/
- MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE = 50063
+ MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE = 50063
,
/**
- * Failed to allocate memory for poll() structures
+ * Failed to add daemon's FDs (ITC and/or listening) to the internal events
+ * monitoring
*/
- MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE = 50063
+ MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE = 50065
,
/**
- * Failed to allocate memory for epoll data
+ * Failed to register daemon's FDs (ITC or listening) in the application
+ * (external event) monitoring
*/
- MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE = 50064
+ MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE = 50066
,
/**
- * Failed to add daemon's FDs (ITC and/or listening) to the epoll monitoring
+ * Failed to create kqueue FD
*/
- MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE = 50065
+ MHD_SC_KQUEUE_FD_CREATE_FAILED = 50067
,
/**
- * Failed to register daemon's FDs (ITC or listening) in the application
- * (external event) monitoring
+ * Failed to configure kqueue FD to be non-inheritable.
*/
- MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE = 50066
+ MHD_SC_KQUEUE_FD_SET_NOINHERIT_FAILED = 50068
+ ,
+ /**
+ * The kqueue FD cannot be used because the FD number is higher
+ * than the limit set by application.
+ */
+ MHD_SC_KQUEUE_FD_OUTSIDE_OF_SET_RANGE = 50069
,
/**
* The select() syscall is not available on this platform or in this MHD
@@ -1066,6 +1074,12 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE = 50072
,
/**
+ * The kqueue syscalls are not available on this platform or in this MHD
+ * build.
+ */
+ MHD_SC_KQUEUE_SYSCALL_NOT_AVAILABLE = 50073
+ ,
+ /**
* Failed to obtain our listen port via introspection.
* FIXME: remove?
*/
@@ -1172,9 +1186,9 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_UNEXPECTED_SELECT_ERROR = 50116
,
/**
- * Failed to remove a socket to the epoll set.
+ * Failed to remove a connection socket to the epoll or kqueue monitoring.
*/
- MHD_SC_EPOLL_CTL_REMOVE_FAILED = 50117
+ MHD_SC_EVENTS_CONN_REMOVE_FAILED = 50117
,
/**
* poll() is not supported.
@@ -1212,6 +1226,11 @@ enum MHD_FIXED_ENUM_MHD_SET_ MHD_StatusCode
MHD_SC_EPOLL_HARD_ERROR = 50130
,
/**
+ * Encountered an unrecoverable error from kevent() function.
+ */
+ MHD_SC_KQUEUE_HARD_ERROR = 50131
+ ,
+ /**
* We failed to configure accepted socket
* to not use a SIGPIPE.
*/
@@ -3488,6 +3507,11 @@ enum MHD_FIXED_ENUM_APP_SET_ MHD_SockPollSyscall
* Use epoll.
*/
MHD_SPS_EPOLL = 3
+ ,
+ /**
+ * Use kqueue.
+ */
+ MHD_SPS_KQUEUE = 4
};
diff --git a/src/mhd2/daemon_add_conn.c b/src/mhd2/daemon_add_conn.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2014-2024 Evgeny Grin (Karlson2k)
+ Copyright (C) 2014-2026 Evgeny Grin (Karlson2k)
Copyright (C) 2007-2018 Daniel Pittman and Christian Grothoff
GNU libmicrohttpd is free software; you can redistribute it and/or
diff --git a/src/mhd2/daemon_start.c b/src/mhd2/daemon_start.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2024 Evgeny Grin (Karlson2k)
+ Copyright (C) 2024-2026 Evgeny Grin (Karlson2k)
GNU libmicrohttpd is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -71,6 +71,7 @@
#ifdef MHD_SUPPORT_EPOLL
# include <sys/epoll.h>
#endif
+#include "sys_kqueue.h"
#ifdef MHD_SOCKETS_KIND_POSIX
# include <fcntl.h>
@@ -207,7 +208,8 @@ daemon_set_work_mode (struct MHD_Daemon *restrict d,
break;
case MHD_WM_EXTERNAL_SINGLE_FD_WATCH:
if ((MHD_SPS_AUTO != s->poll_syscall) &&
- (MHD_SPS_EPOLL != s->poll_syscall))
+ (MHD_SPS_EPOLL != s->poll_syscall) &&
+ (MHD_SPS_KQUEUE != s->poll_syscall))
{
mhd_LOG_MSG ( \
d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \
@@ -215,11 +217,11 @@ daemon_set_work_mode (struct MHD_Daemon *restrict d,
"is not compatible with requested socket polling syscall.");
return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID;
}
-#ifndef MHD_SUPPORT_EPOLL
+#if ! defined(MHD_SUPPORT_EPOLL) && ! defined(MHD_SUPPORT_KQUEUE)
mhd_LOG_MSG ( \
d, MHD_SC_FEATURE_DISABLED, \
- "The epoll is required for the requested work mode " \
- "MHD_WM_EXTERNAL_SINGLE_FD_WATCH, but not available on this " \
+ "The epoll or kqueue is required for the requested work mode " \
+ "MHD_WM_EXTERNAL_SINGLE_FD_WATCH, but none is available on this " \
"platform or MHD build.");
return MHD_SC_FEATURE_DISABLED;
#else
@@ -235,6 +237,14 @@ daemon_set_work_mode (struct MHD_Daemon *restrict d,
"is not compatible with 'epoll' sockets polling.");
return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID;
}
+ if (MHD_SPS_KQUEUE == s->poll_syscall)
+ {
+ mhd_LOG_MSG ( \
+ d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \
+ "The requested work mode MHD_WM_THREAD_PER_CONNECTION " \
+ "is not compatible with 'kqueue' sockets polling.");
+ return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID;
+ }
mhd_FALLTHROUGH;
/* Intentional fallthrough */
case MHD_WM_WORKER_THREADS:
@@ -1229,22 +1239,65 @@ detect_listen_type_and_port (struct MHD_Daemon *restrict d)
/**
* Initialise daemon's epoll FD
+ * The could be performed early to probe for epoll FD presence
+ * or, normally, during worker initialisation
+ * @param d the daemon object
+ * @param early_probing 'true' if this is early epoll probing
+ * @param log_failures set to true if errors logging should be suppressed
+ * when fallback options exist
+ * @return #MHD_SC_OK on success,
+ * the error code otherwise
*/
static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
-init_epoll (struct MHD_Daemon *restrict d,
- bool log_failures)
+init_epoll_fd (struct MHD_Daemon *restrict d,
+ bool early_probing,
+ bool log_failures)
{
int e_fd;
+ mhd_ASSUME (early_probing || log_failures);
+ mhd_assert ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) ||
+ (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type));
mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int));
- mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \
- ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) && \
- (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))));
- mhd_assert ((! d->dbg.net_inited) || \
- (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)));
+ mhd_assert (early_probing || d->dbg.net_inited);
+ mhd_assert (! early_probing || ! d->dbg.net_inited);
mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \
(NULL == d->events.data.epoll.events));
- mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \
- (MHD_INVALID_SOCKET == d->events.data.epoll.e_fd));
+
+ if (! early_probing)
+ {
+ /* Full events initialisation */
+ mhd_ASSUME (mhd_POLL_TYPE_EPOLL == d->events.poll_type);
+ if (! mhd_D_HAS_MASTER (d))
+ {
+ mhd_assert (0 < d->events.data.epoll.early_fd);
+ /* Move early initialised epoll FD */
+ d->events.data.epoll.e_fd = d->events.data.epoll.early_fd;
+ d->events.data.epoll.early_fd = MHD_INVALID_SOCKET;
+ return MHD_SC_OK;
+ }
+#ifdef MHD_SUPPORT_THREADS
+ else
+ {
+ /* Worker daemon */
+ int early_fd;
+
+ early_fd = d->threading.hier.master->events.data.epoll.early_fd;
+
+ /* Move early initialised epoll FD if it is not yet taken */
+ if (MHD_INVALID_SOCKET != early_fd)
+ {
+ d->events.data.epoll.e_fd = early_fd;
+ d->threading.hier.master->events.data.epoll.early_fd =
+ MHD_INVALID_SOCKET;
+ return MHD_SC_OK;
+ }
+ /* Process with new epoll FD creation */
+ }
+#endif /* MHD_SUPPORT_THREADS */
+ }
+ else
+ mhd_ASSUME (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type);
+
#ifdef HAVE_EPOLL_CREATE1
e_fd = epoll_create1 (EPOLL_CLOEXEC);
#else /* ! HAVE_EPOLL_CREATE1 */
@@ -1273,29 +1326,28 @@ init_epoll (struct MHD_Daemon *restrict d,
return MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE; /* Failure exit point */
}
+ /* Needs to be set here as setting epoll data member 'early_fd' */
d->events.poll_type = mhd_POLL_TYPE_EPOLL;
- d->events.data.epoll.e_fd = e_fd;
- d->events.data.epoll.events = NULL; /* Memory allocated during event and threads init */
- d->events.data.epoll.num_elements = 0;
+ if (! early_probing)
+ d->events.data.epoll.e_fd = e_fd;
+ else
+ d->events.data.epoll.early_fd = e_fd;
+
return MHD_SC_OK; /* Success exit point */
}
/**
* Deinitialise daemon's epoll FD
+ * @param d the daemon object
*/
MHD_FN_PAR_NONNULL_ (1) static void
-deinit_epoll (struct MHD_Daemon *restrict d)
+deinit_epoll_fd (struct MHD_Daemon *restrict d)
{
mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type);
- /* With thread pool the epoll control FD could be migrated to the
- * first worker daemon. */
- mhd_assert ((MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) || \
- (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)));
- mhd_assert ((MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) || \
- (mhd_D_HAS_WORKERS (d)));
- if (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd)
- close (d->events.data.epoll.e_fd);
+ mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd);
+ mhd_assert (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd);
+ close (d->events.data.epoll.e_fd);
}
@@ -1322,16 +1374,6 @@ daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d,
(mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) || \
(MHD_SPS_AUTO == s->poll_syscall));
- mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \
- (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int) || \
- (mhd_WM_INT_EXTERNAL_EVENTS_LEVEL == d->wmode_int) || \
- (MHD_WM_EXTERNAL_SINGLE_FD_WATCH == s->work_mode.mode));
- mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \
- (d->events.poll_type == (enum mhd_IntPollType) s->poll_syscall) \
- || ((MHD_SPS_AUTO == s->poll_syscall) && \
- ((mhd_POLL_TYPE_EXT == d->events.poll_type) || \
- mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type))));
-
/* Check whether the provided parameter is in the range of expected values.
Reject unsupported or disabled values. */
switch (s->poll_syscall)
@@ -1363,6 +1405,7 @@ daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d,
break;
case MHD_SPS_EPOLL:
mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
+ mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int));
#ifndef MHD_SUPPORT_EPOLL
mhd_LOG_MSG (d, MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE, \
"'epoll' is not supported by the platform or " \
@@ -1372,13 +1415,51 @@ daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d,
chosen_type = mhd_POLL_TYPE_EPOLL;
#endif /* MHD_SUPPORT_EPOLL */
break;
+ case MHD_SPS_KQUEUE:
+ mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
+ mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int));
+#ifndef MHD_SUPPORT_KQUEUE
+ mhd_LOG_MSG (d, MHD_SC_KQUEUE_SYSCALL_NOT_AVAILABLE, \
+ "'kqueue' is not supported by the platform or " \
+ "this MHD build");
+ return MHD_SC_KQUEUE_SYSCALL_NOT_AVAILABLE;
+#else /* MHD_SUPPORT_EPOLL */
+ chosen_type = mhd_POLL_TYPE_KQUEUE;
+#endif /* MHD_SUPPORT_EPOLL */
+ break;
default:
mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_SPS,
"Wrong socket polling syscall specified");
return MHD_SC_CONFIGURATION_UNEXPECTED_SPS;
}
+#ifdef MHD_SUPPORT_HTTPS
+ if ((mhd_WM_INT_EXTERNAL_EVENTS_EDGE == (d)->wmode_int) ||
+ mhd_POLL_TYPE_INT_IS_EDGE_TRIG (chosen_type))
+ { /* Edge-triggered polling chosen */
+ if (MHD_TLS_BACKEND_NONE != s->tls)
+ {
+ if (! mhd_tls_is_edge_trigg_supported (s))
+ {
+#ifdef MHD_SUPPORT_LOG_FUNCTIONALITY
+ if (MHD_TLS_BACKEND_ANY == s->tls)
+ mhd_LOG_MSG (d, MHD_SC_TLS_BACKEND_DAEMON_INCOMPATIBLE_SETTINGS, \
+ "Edge-triggered sockets polling cannot be used "
+ "with available TLS backends");
+ else
+ mhd_LOG_MSG (d, MHD_SC_TLS_BACKEND_DAEMON_INCOMPATIBLE_SETTINGS, \
+ "Edge-triggered sockets polling cannot be used "
+ "with selected TLS backend");
+#endif /* MHD_SUPPORT_LOG_FUNCTIONALITY */
+ return MHD_SC_TLS_BACKEND_DAEMON_INCOMPATIBLE_SETTINGS;
+ }
+ }
+ }
+#endif /* MHD_SUPPORT_HTTPS */
+
mhd_assert (mhd_POLL_TYPE_EXT != chosen_type);
+ mhd_ASSUME ((mhd_POLL_TYPE_NOT_SET_YET != chosen_type) || \
+ (MHD_SPS_AUTO == s->poll_syscall));
if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type)
{
@@ -1386,67 +1467,66 @@ daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d,
chosen_type = mhd_POLL_TYPE_EXT;
}
-#ifdef MHD_SUPPORT_EPOLL
- /* Try 'epoll' if needed or possible */
+#if defined(MHD_SUPPORT_EPOLL) || defined(MHD_SUPPORT_KQUEUE)
+ /* Try edge-triggered polling */
if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type)
- || (mhd_POLL_TYPE_EPOLL == chosen_type))
+ || mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type)
+ || mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type))
{
- bool epoll_required;
- bool epoll_allowed;
+ bool et_allowed;
- epoll_required = false;
- if (mhd_POLL_TYPE_EPOLL == chosen_type)
- {
- mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int));
- epoll_required = true;
- }
- else if (MHD_WM_EXTERNAL_SINGLE_FD_WATCH == s->work_mode.mode)
- {
- mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int));
- epoll_required = true;
- }
-
- epoll_allowed = true;
+ et_allowed = true;
if (mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int))
{
- mhd_assert (! epoll_required);
- epoll_allowed = false;
+ mhd_assert (! mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type));
+ mhd_assert (! mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type));
+ et_allowed = false;
}
# ifdef MHD_SUPPORT_HTTPS
else if (MHD_TLS_BACKEND_NONE != s->tls)
- {
- if (! epoll_required)
- epoll_allowed = mhd_tls_is_edge_trigg_supported (s);
- /* If 'epoll' is required, but TLS backend does not support it,
- then continue with 'epoll' here and fail at TLS initialisation. */
- /* TODO: fail here */
- }
+ et_allowed = mhd_tls_is_edge_trigg_supported (s);
# endif /* MHD_SUPPORT_HTTPS */
- mhd_assert (epoll_allowed || ! epoll_required);
-
- if (epoll_allowed)
+ if (et_allowed)
{
- enum MHD_StatusCode epoll_res;
-
- epoll_res = init_epoll (d,
- epoll_required);
- if (MHD_SC_OK == epoll_res)
- chosen_type = mhd_POLL_TYPE_EPOLL;
- else
+ mhd_ASSUME ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type)
+ || mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type)
+ || mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type));
+
+# if defined(MHD_SUPPORT_KQUEUE)
+ if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type)
+ || mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type))
+ chosen_type = mhd_POLL_TYPE_KQUEUE; /* No need to perform additional checking here */
+# endif /* MHD_SUPPORT_KQUEUE */
+# ifdef MHD_SUPPORT_EPOLL
+ if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type)
+ || mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type))
{
- if (epoll_required)
- return epoll_res;
- mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type);
+ /* epoll - need to be probed here as it can be disabled in kernel */
+ enum MHD_StatusCode epoll_res;
+ epoll_res = init_epoll_fd (d,
+ true,
+ mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type));
+ if (MHD_SC_OK != epoll_res)
+ {
+ if (mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type))
+ return epoll_res;
+ mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type);
+ }
+ else
+ chosen_type = mhd_POLL_TYPE_EPOLL;
}
+# endif /* ! MHD_SUPPORT_EPOLL */
}
else
- mhd_assert (mhd_POLL_TYPE_EPOLL != chosen_type);
+ mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type);
}
+# ifdef MHD_SUPPORT_EPOLL
mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \
- (0 < d->events.data.epoll.e_fd));
+ (0 < d->events.data.epoll.early_fd));
mhd_assert ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) == \
(mhd_POLL_TYPE_EPOLL == chosen_type));
+# endif /* ! MHD_SUPPORT_EPOLL */
#endif /* ! MHD_SUPPORT_EPOLL */
if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type)
@@ -1511,12 +1591,25 @@ daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d,
mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode);
mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode);
mhd_assert (MHD_NO == s->reregister_all);
- /* Pre-initialised by init_epoll() */
+ /* Pre-initialised by init_epoll_fd() */
mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type);
- mhd_assert (0 <= d->events.data.epoll.e_fd);
- mhd_assert (NULL == d->events.data.epoll.events);
+ mhd_assert (0 <= d->events.data.epoll.early_fd);
+ d->events.data.epoll.events = NULL; /* Memory allocated during event and threads init */
+ d->events.data.epoll.num_elements = 0;
break;
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
+ mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode);
+ mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode);
+ mhd_assert (MHD_NO == s->reregister_all);
+ d->events.poll_type = mhd_POLL_TYPE_KQUEUE;
+ d->events.data.kq.kq_fd = -1;
+ d->events.data.kq.kes = NULL;
+ d->events.data.kq.num_elements = 0u;
+ break;
+#endif /* MHD_SUPPORT_KQUEUE */
#ifndef MHD_SUPPORT_SELECT
case mhd_POLL_TYPE_SELECT:
#endif /* ! MHD_SUPPORT_SELECT */
@@ -1608,8 +1701,14 @@ daemon_init_net (struct MHD_Daemon *restrict d,
}
#ifdef MHD_SUPPORT_EPOLL
- if ((mhd_POLL_TYPE_EPOLL == d->events.poll_type))
- close (d->events.data.epoll.e_fd);
+ /* Special case for epoll: epoll FD is probed early in events
+ pre-initialisation and is not moved to events epoll FD therefore
+ it needs cleanup here */
+ if (mhd_POLL_TYPE_EPOLL == d->events.poll_type)
+ {
+ mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.early_fd);
+ close (d->events.data.epoll.early_fd);
+ }
#endif /* MHD_SUPPORT_EPOLL */
mhd_assert (MHD_SC_OK != ret);
@@ -1629,8 +1728,15 @@ daemon_deinit_net (struct MHD_Daemon *restrict d)
mhd_assert (! d->dbg.net_deinited);
mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type);
#ifdef MHD_SUPPORT_EPOLL
- if (mhd_POLL_TYPE_EPOLL == d->events.poll_type)
- deinit_epoll (d);
+ /* Special case for epoll: epoll FD is probed early in events
+ pre-initialisation and could be not moved yet to events epoll FD
+ therefore it needs cleanup here */
+ if ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) &&
+ (MHD_INVALID_SOCKET != d->events.data.epoll.early_fd))
+ {
+ mhd_assert (0 < d->events.data.epoll.early_fd);
+ close (d->events.data.epoll.early_fd);
+ }
#endif /* MHD_SUPPORT_EPOLL */
if (MHD_INVALID_SOCKET != d->net.listen.fd)
mhd_socket_close (d->net.listen.fd);
@@ -1897,6 +2003,172 @@ daemon_deinit_large_buf (struct MHD_Daemon *restrict d)
}
+#ifdef MHD_SUPPORT_KQUEUE
+
+/**
+ * Initialise daemon's kqueue FD
+ * @param d the daemon object
+ * @return #MHD_SC_OK on success,
+ * the error code otherwise
+ */
+static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
+init_kqueue_fd (struct MHD_Daemon *restrict d)
+{
+ int kq_fd;
+ mhd_assert (mhd_POLL_TYPE_KQUEUE == d->events.poll_type);
+ mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int));
+ mhd_assert (d->dbg.net_inited);
+ mhd_assert (MHD_INVALID_SOCKET == d->events.data.kq.kq_fd);
+ mhd_assert (NULL == d->events.data.kq.kes);
+ mhd_assert (0u == d->events.data.kq.num_elements);
+
+ kq_fd = mhd_kqueue ();
+ if (0 > kq_fd)
+ {
+ mhd_LOG_MSG (d, MHD_SC_KQUEUE_FD_CREATE_FAILED, \
+ "Failed to create kqueue FD");
+ return MHD_SC_KQUEUE_FD_CREATE_FAILED; /* Failure exit point */
+ }
+
+ if (! mhd_FD_FITS_DAEMON (d, kq_fd))
+ {
+ mhd_LOG_MSG (d, MHD_SC_KQUEUE_FD_OUTSIDE_OF_SET_RANGE, \
+ "The kqueue FD value is higher than allowed");
+ (void) close (kq_fd);
+ return MHD_SC_KQUEUE_FD_OUTSIDE_OF_SET_RANGE; /* Failure exit point */
+ }
+
+ if (! mhd_KQUEUE_HAS_CLOEXEC_SET ())
+ {
+ if (! mhd_socket_noninheritable (kq_fd))
+ mhd_LOG_MSG (d, MHD_SC_KQUEUE_FD_SET_NOINHERIT_FAILED, \
+ "Failed to make kqueue FD non-inheritable");
+ }
+ d->events.data.kq.kq_fd = kq_fd;
+
+ return MHD_SC_OK; /* Success exit point */
+}
+
+
+/**
+ * Deinitialise daemon's kqueue FD
+ * @param d the daemon object
+ */
+MHD_FN_PAR_NONNULL_ (1) static void
+deinit_kqueue_fd (struct MHD_Daemon *restrict d)
+{
+ mhd_assert (mhd_POLL_TYPE_KQUEUE == d->events.poll_type);
+ mhd_assert (0 < d->events.data.kq.kq_fd);
+ close (d->events.data.kq.kq_fd);
+}
+
+
+#endif /* MHD_SUPPORT_KQUEUE */
+
+
+/**
+ * Initialise daemon's events polling FD (if used by polling function)
+ * @param d the daemon object
+ * @return #MHD_SC_OK on success,
+ * the error code otherwise
+ */
+static MHD_FN_PAR_NONNULL_ (1)
+MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode
+init_events_fd (struct MHD_Daemon *restrict d)
+{
+ enum MHD_StatusCode res;
+
+ res = MHD_SC_OK;
+ switch (d->events.poll_type)
+ {
+ case mhd_POLL_TYPE_EXT:
+ break;
+#ifdef MHD_SUPPORT_SELECT
+ case mhd_POLL_TYPE_SELECT:
+ break;
+#endif /* MHD_SUPPORT_SELECT */
+#ifdef MHD_SUPPORT_POLL
+ case mhd_POLL_TYPE_POLL:
+ break;
+#endif /* MHD_SUPPORT_POLL */
+#ifdef MHD_SUPPORT_EPOLL
+ case mhd_POLL_TYPE_EPOLL:
+ res = init_epoll_fd (d,
+ false,
+ true);
+ break;
+#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ res = init_kqueue_fd (d);
+ break;
+#endif /* MHD_SUPPORT_KQUEUE */
+#ifndef MHD_SUPPORT_SELECT
+ case mhd_POLL_TYPE_SELECT:
+#endif /* ! MHD_SUPPORT_SELECT */
+#ifndef MHD_SUPPORT_POLL
+ case mhd_POLL_TYPE_POLL:
+#endif /* ! MHD_SUPPORT_POLL */
+ case mhd_POLL_TYPE_NOT_SET_YET:
+ default:
+ mhd_UNREACHABLE ();
+ return MHD_SC_INTERNAL_ERROR;
+ }
+
+#ifndef NDEBUG
+ if (MHD_SC_OK == res)
+ d->dbg.events_fd_inited = true;
+#endif /* ! NDEBUG */
+
+ return res;
+}
+
+
+/**
+ * Deinitialise daemon's events polling FD (if used by polling function)
+ * @param d the daemon object
+ */
+static MHD_FN_PAR_NONNULL_ (1) void
+deinit_events_fd (struct MHD_Daemon *restrict d)
+{
+ mhd_assert (d->dbg.events_fd_inited);
+
+ switch (d->events.poll_type)
+ {
+ case mhd_POLL_TYPE_EXT:
+ return;
+#ifdef MHD_SUPPORT_SELECT
+ case mhd_POLL_TYPE_SELECT:
+ return;
+#endif /* MHD_SUPPORT_SELECT */
+#ifdef MHD_SUPPORT_POLL
+ case mhd_POLL_TYPE_POLL:
+ return;
+#endif /* MHD_SUPPORT_POLL */
+#ifdef MHD_SUPPORT_EPOLL
+ case mhd_POLL_TYPE_EPOLL:
+ deinit_epoll_fd (d);
+ return;
+#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ deinit_kqueue_fd (d);
+ return;
+#endif /* MHD_SUPPORT_KQUEUE */
+#ifndef MHD_SUPPORT_SELECT
+ case mhd_POLL_TYPE_SELECT:
+#endif /* ! MHD_SUPPORT_SELECT */
+#ifndef MHD_SUPPORT_POLL
+ case mhd_POLL_TYPE_POLL:
+#endif /* ! MHD_SUPPORT_POLL */
+ case mhd_POLL_TYPE_NOT_SET_YET:
+ default:
+ mhd_UNREACHABLE ();
+ }
+ return;
+}
+
+
/**
* Finish initialisation of events processing
* @param d the daemon object
@@ -1911,19 +2183,20 @@ allocate_events (struct MHD_Daemon *restrict d)
/**
* The number of elements to be monitored by sockets polling function
*/
- unsigned int num_elements;
- num_elements = 0;
+ unsigned int num_fd_elems;
+ num_fd_elems = 0;
#ifdef MHD_SUPPORT_THREADS
- ++num_elements; /* For the ITC */
+ ++num_fd_elems; /* For the ITC */
#endif
if (MHD_INVALID_SOCKET != d->net.listen.fd)
- ++num_elements; /* For the listening socket */
+ ++num_fd_elems; /* For the listening socket */
if (! mhd_D_HAS_THR_PER_CONN (d))
- num_elements += d->conns.cfg.count_limit;
+ num_fd_elems += d->conns.cfg.count_limit;
#endif /* MHD_SUPPORT_POLL || MHD_SUPPORT_EPOLL */
mhd_assert (0 != d->conns.cfg.count_limit);
mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type));
+ mhd_assert (! d->dbg.events_allocated);
mhd_DLINKEDL_INIT_LIST (&(d->events),proc_ready);
@@ -1963,9 +2236,9 @@ allocate_events (struct MHD_Daemon *restrict d)
}
free (d->events.data.select.rfds);
}
- mhd_LOG_MSG (d, MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE, \
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \
"Failed to allocate memory for fd_sets for the daemon");
- return MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE;
+ return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE;
break;
#endif /* MHD_SUPPORT_SELECT */
#ifdef MHD_SUPPORT_POLL
@@ -1973,20 +2246,20 @@ allocate_events (struct MHD_Daemon *restrict d)
/* The pointers have been set to NULL during pre-initialisations of the events */
mhd_assert (NULL == d->events.data.poll.fds);
mhd_assert (NULL == d->events.data.poll.rel);
- if ((num_elements > d->conns.cfg.count_limit) /* Check for value overflow */
+ if ((num_fd_elems > d->conns.cfg.count_limit) /* Check for value overflow */
|| (mhd_D_HAS_THR_PER_CONN (d)))
{
d->events.data.poll.fds =
- (struct pollfd *) malloc (sizeof(struct pollfd) * num_elements);
+ (struct pollfd *) malloc (sizeof(struct pollfd) * num_fd_elems);
if (NULL != d->events.data.poll.fds)
{
d->events.data.poll.rel =
(union mhd_SocketRelation *) malloc (sizeof(union mhd_SocketRelation)
- * num_elements);
+ * num_fd_elems);
if (NULL != d->events.data.poll.rel)
{
#ifndef NDEBUG
- d->dbg.num_events_elements = num_elements;
+ d->dbg.num_events_elements = num_fd_elems;
d->dbg.events_allocated = true;
#endif
return MHD_SC_OK; /* Success exit point */
@@ -1994,50 +2267,104 @@ allocate_events (struct MHD_Daemon *restrict d)
free (d->events.data.poll.fds);
}
}
- mhd_LOG_MSG (d, MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE, \
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \
"Failed to allocate memory for poll fds for the daemon");
- return MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE;
+ return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE;
break;
#endif /* MHD_SUPPORT_POLL */
#ifdef MHD_SUPPORT_EPOLL
case mhd_POLL_TYPE_EPOLL:
- mhd_assert (! mhd_D_HAS_THR_PER_CONN (d));
- /* The event FD has been created during pre-initialisations of the events */
+ mhd_ASSUME (! mhd_D_HAS_THR_PER_CONN (d));
+ /* The event FD has been created by event FD initialisation */
mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd);
- /* The pointer has been set to NULL during pre-initialisations of the events */
+ mhd_assert (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd);
+ /* The pointer has been set to NULL during pre-initialisation of the events */
mhd_assert (NULL == d->events.data.epoll.events);
mhd_assert (0 == d->events.data.epoll.num_elements);
- if ((num_elements > d->conns.cfg.count_limit) /* Check for value overflow */
- || (mhd_D_HAS_THR_PER_CONN (d)))
+ if (1)
{
const unsigned int upper_limit = (sizeof(void*) >= 8) ? 4096u : 1024u;
mhd_assert (0 < (int) upper_limit);
mhd_assert (upper_limit == (unsigned int) (size_t) upper_limit);
+ if (num_fd_elems < d->conns.cfg.count_limit) /* Check for value overflow */
+ num_fd_elems = upper_limit;
/* Trade neglectable performance penalty for memory saving */
/* Very large amount of new events processed in batches */
- if (num_elements > upper_limit)
- num_elements = upper_limit;
+ else if (num_fd_elems > upper_limit)
+ num_fd_elems = upper_limit;
d->events.data.epoll.events =
(struct epoll_event *) malloc (sizeof(struct epoll_event)
- * num_elements);
+ * num_fd_elems);
if (NULL != d->events.data.epoll.events)
{
- d->events.data.epoll.num_elements = num_elements;
+ d->events.data.epoll.num_elements = num_fd_elems;
#ifndef NDEBUG
- d->dbg.num_events_elements = num_elements;
+ d->dbg.num_events_elements = num_fd_elems;
d->dbg.events_allocated = true;
#endif
return MHD_SC_OK; /* Success exit point */
}
}
- mhd_LOG_MSG (d, MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE, \
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \
"Failed to allocate memory for epoll events for the daemon");
- return MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE;
+ return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE;
break;
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ mhd_ASSUME (! mhd_D_HAS_THR_PER_CONN (d));
+ /* The event FD has been created by event FD initialisation */
+ mhd_assert (0 < d->events.data.kq.kq_fd);
+ /* The pointer has been set to NULL during pre-initialisation of the events */
+ mhd_assert (NULL == d->events.data.kq.kes);
+ mhd_assert (0u == d->events.data.kq.num_elements);
+ if (1)
+ {
+ const unsigned int upper_limit = (sizeof(void*) >= 8) ? 4096u : 1024u;
+ unsigned int num_event_elems = 0;
+
+ mhd_assert (0 < (int) upper_limit);
+ mhd_assert (upper_limit == (unsigned int) (size_t) upper_limit);
+
+#ifdef MHD_SUPPORT_THREADS
+ ++num_event_elems; /* For the ITC */
+#endif
+ if (MHD_INVALID_SOCKET != d->net.listen.fd)
+ ++num_event_elems; /* For the listening socket */
+
+ /* kqueue needs slots for individual combinations of FD + filter (send/recv) */
+ num_event_elems += 2 * d->conns.cfg.count_limit;
+ if (d->conns.cfg.count_limit > (num_event_elems / 2)) /* Check for overflow */
+ num_event_elems = upper_limit;
+ /* Trade neglectable performance penalty for memory saving */
+ /* Very large amount of new events processed in batches */
+ else if (upper_limit < num_event_elems)
+ num_event_elems = upper_limit;
+
+ /* Make sure that run-time overflow check is easy */
+ mhd_assert (((int) num_event_elems) > 0);
+ mhd_assert (((int) (num_event_elems + 1)) > 0);
+
+ d->events.data.kq.kes =
+ (struct kevent *) malloc (sizeof(struct kevent) * num_event_elems);
+ if (NULL != d->events.data.kq.kes)
+ {
+ d->events.data.kq.num_elements = num_event_elems;
+#ifndef NDEBUG
+ d->dbg.num_events_elements = num_event_elems;
+ d->dbg.events_allocated = true;
+#endif
+ return MHD_SC_OK; /* Success exit point */
+ }
+ }
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \
+ "Failed to allocate memory for kqueue events for the daemon");
+ return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE;
+ break;
+#endif /* MHD_SUPPORT_KQUEUE */
#ifndef MHD_SUPPORT_SELECT
case mhd_POLL_TYPE_SELECT:
#endif /* ! MHD_SUPPORT_SELECT */
@@ -2061,42 +2388,55 @@ allocate_events (struct MHD_Daemon *restrict d)
static MHD_FN_PAR_NONNULL_ (1) void
deallocate_events (struct MHD_Daemon *restrict d)
{
+ mhd_assert (d->dbg.events_allocated);
mhd_assert (0 != d->conns.cfg.count_limit);
mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type));
- if (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type)
+ switch (d->events.poll_type)
{
- mhd_assert (0 && "Wrong workflow");
- mhd_UNREACHABLE ();
- return;
- }
+ case mhd_POLL_TYPE_EXT:
+ break;
#ifdef MHD_SUPPORT_SELECT
- else if (mhd_POLL_TYPE_SELECT == d->events.poll_type)
- {
+ case mhd_POLL_TYPE_SELECT:
mhd_assert (NULL != d->events.data.select.efds);
mhd_assert (NULL != d->events.data.select.wfds);
mhd_assert (NULL != d->events.data.select.rfds);
free (d->events.data.select.efds);
free (d->events.data.select.wfds);
free (d->events.data.select.rfds);
- }
+ break;
#endif /* MHD_SUPPORT_SELECT */
#ifdef MHD_SUPPORT_POLL
- else if (mhd_POLL_TYPE_POLL == d->events.poll_type)
- {
+ case mhd_POLL_TYPE_POLL:
mhd_assert (NULL != d->events.data.poll.rel);
mhd_assert (NULL != d->events.data.poll.fds);
free (d->events.data.poll.rel);
free (d->events.data.poll.fds);
- }
+ break;
#endif /* MHD_SUPPORT_POLL */
#ifdef MHD_SUPPORT_EPOLL
- else if (mhd_POLL_TYPE_EPOLL == d->events.poll_type)
- {
+ case mhd_POLL_TYPE_EPOLL:
mhd_assert (0 != d->events.data.epoll.num_elements);
mhd_assert (NULL != d->events.data.epoll.events);
free (d->events.data.epoll.events);
- }
+ break;
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ mhd_assert (0 != d->events.data.kq.num_elements);
+ mhd_assert (NULL != d->events.data.kq.kes);
+ free (d->events.data.kq.kes);
+ break;
+#endif /* MHD_SUPPORT_KQUEUE */
+#ifndef MHD_SUPPORT_SELECT
+ case mhd_POLL_TYPE_SELECT:
+#endif /* ! MHD_SUPPORT_SELECT */
+#ifndef MHD_SUPPORT_POLL
+ case mhd_POLL_TYPE_POLL:
+#endif /* ! MHD_SUPPORT_POLL */
+ case mhd_POLL_TYPE_NOT_SET_YET:
+ default:
+ mhd_UNREACHABLE_D ("Wrong workflow");
+ }
#ifndef NDEBUG
d->dbg.events_allocated = false;
#endif
@@ -2305,6 +2645,7 @@ init_daemon_fds_monitoring (struct MHD_Daemon *restrict d)
#ifdef MHD_SUPPORT_EPOLL
case mhd_POLL_TYPE_EPOLL:
mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd);
+ mhd_assert (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd);
mhd_assert (NULL != d->events.data.epoll.events);
mhd_assert (0 < d->events.data.epoll.num_elements);
if (1)
@@ -2316,9 +2657,9 @@ init_daemon_fds_monitoring (struct MHD_Daemon *restrict d)
if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD,
mhd_itc_r_fd (d->threading.itc), ®_event))
{
- mhd_LOG_MSG (d, MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE, \
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE, \
"Failed to add ITC FD to the epoll monitoring.");
- return MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE;
+ return MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE;
}
mhd_dbg_print_fd_mon_req ("ITC", \
mhd_itc_r_fd (d->threading.itc), \
@@ -2333,9 +2674,9 @@ init_daemon_fds_monitoring (struct MHD_Daemon *restrict d)
if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD,
d->net.listen.fd, ®_event))
{
- mhd_LOG_MSG (d, MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE, \
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE, \
"Failed to add listening FD to the epoll monitoring.");
- return MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE;
+ return MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE;
}
mhd_dbg_print_fd_mon_req ("lstn", \
d->net.listen.fd, \
@@ -2347,6 +2688,77 @@ init_daemon_fds_monitoring (struct MHD_Daemon *restrict d)
return MHD_SC_OK;
break;
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ mhd_assert (0 < d->events.data.kq.kq_fd);
+ mhd_assert (NULL != d->events.data.kq.kes);
+ mhd_assert (2u < d->events.data.kq.num_elements);
+ if (1)
+ {
+ int num_elemnts;
+
+ num_elemnts = 0;
+#ifdef MHD_SUPPORT_THREADS
+ mhd_KE_SET (d->events.data.kq.kes + num_elemnts,
+ mhd_itc_r_fd (d->threading.itc),
+ EVFILT_READ,
+ EV_ADD, /* level trigger */
+ mhd_SOCKET_REL_PTRMARKER_ITC);
+ mhd_dbg_print_kevent_change ("ITC",
+ d->events.data.kq.kes + num_elemnts);
+ ++num_elemnts;
+#endif
+ if (MHD_INVALID_SOCKET != d->net.listen.fd)
+ {
+ mhd_KE_SET (d->events.data.kq.kes + num_elemnts,
+ d->net.listen.fd,
+ EVFILT_READ,
+ EV_ADD, /* level trigger */
+ mhd_SOCKET_REL_PTRMARKER_LISTEN);
+
+ mhd_dbg_print_kevent_change ("lstn",
+ d->events.data.kq.kes + num_elemnts);
+ ++num_elemnts;
+ }
+
+ if (0 != num_elemnts)
+ {
+ static const struct timespec zero_timeout = {0, 0};
+ int res;
+
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Starting) kevent(%d, changes, %d, [NULL], "
+ "0, [0, 0])...\n",
+ d->events.data.kq.kq_fd,
+ num_elemnts);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ res = kevent (d->events.data.kq.kq_fd,
+ d->events.data.kq.kes,
+ num_elemnts,
+ NULL,
+ 0,
+ &zero_timeout);
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Finished) kevent(%d, changes, %d, [NULL], "
+ "0, [0, 0]) -> %d\n",
+ d->events.data.kq.kq_fd,
+ num_elemnts,
+ res);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ if (0 != res)
+ {
+ mhd_LOG_MSG (d, MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE, \
+ "Failed to add ITC or listening FD to the "
+ "kqueue monitoring.");
+ return MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE;
+ }
+ }
+ }
+ return MHD_SC_OK;
+ break;
+#endif /* MHD_SUPPORT_KQUEUE */
#ifndef MHD_SUPPORT_SELECT
case mhd_POLL_TYPE_SELECT:
#endif /* ! MHD_SUPPORT_SELECT */
@@ -2412,6 +2824,13 @@ deinit_daemon_fds_monitoring (struct MHD_Daemon *restrict d)
return;
break;
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ /* Nothing to do when using kqueue.
+ Monitoring stopped by closing kqueue FD. */
+ return;
+ break;
+#endif /* MHD_SUPPORT_EPOLL */
#ifndef MHD_SUPPORT_SELECT
case mhd_POLL_TYPE_SELECT:
#endif /* ! MHD_SUPPORT_SELECT */
@@ -2531,39 +2950,45 @@ init_individual_thread_data_events_conns (struct MHD_Daemon *restrict d,
mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type));
mhd_assert (! d->dbg.connections_inited);
- res = allocate_events (d);
+ res = init_events_fd (d);
if (MHD_SC_OK != res)
return res;
- res = init_itc (d);
+ res = allocate_events (d);
if (MHD_SC_OK == res)
{
- res = init_daemon_fds_monitoring (d);
-
+ res = init_itc (d);
if (MHD_SC_OK == res)
{
+ res = init_daemon_fds_monitoring (d);
+
+ if (MHD_SC_OK == res)
+ {
#ifndef NDEBUG
- d->dbg.events_fully_inited = true;
+ d->dbg.events_fully_inited = true;
#endif
#ifdef MHD_SUPPORT_THREADS
- mhd_thread_handle_ID_set_invalid (&(d->threading.tid));
- d->threading.stop_requested = false;
+ mhd_thread_handle_ID_set_invalid (&(d->threading.tid));
+ d->threading.stop_requested = false;
#endif /* MHD_SUPPORT_THREADS */
#ifndef NDEBUG
- d->dbg.threading_inited = true;
+ d->dbg.threading_inited = true;
#endif
- res = init_individual_conns (d, s);
- if (MHD_SC_OK == res)
- return MHD_SC_OK;
+ res = init_individual_conns (d, s);
+ if (MHD_SC_OK == res)
+ return MHD_SC_OK;
- /* Below is a clean-up path */
+ /* Below is a clean-up path */
- deinit_daemon_fds_monitoring (d);
+ deinit_daemon_fds_monitoring (d);
+ }
+ deinit_itc (d);
}
- deinit_itc (d);
+ deallocate_events (d);
}
- deallocate_events (d);
+ deinit_events_fd (d);
+
mhd_assert (MHD_SC_OK != res);
return res;
}
@@ -2583,6 +3008,7 @@ deinit_individual_thread_data_events_conns (struct MHD_Daemon *restrict d)
deinit_daemon_fds_monitoring (d);
deinit_itc (d);
deallocate_events (d);
+ deinit_events_fd (d);
mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn));
mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready));
#ifndef NDEBUG
@@ -2948,6 +3374,7 @@ set_d_threading_type (struct MHD_Daemon *restrict d)
mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int));
mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type);
mhd_assert (! mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type));
+ mhd_assert (! mhd_POLL_TYPE_INT_IS_KQUEUE (d->events.poll_type));
d->threading.d_type = mhd_DAEMON_TYPE_LISTEN_ONLY;
return MHD_SC_OK;
case mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL:
@@ -2991,10 +3418,6 @@ deinit_workers_pool (struct MHD_Daemon *restrict d,
{ /* Note: loop exits after underflow of 'i' */
struct MHD_Daemon *const worker = d->threading.hier.pool.workers + i;
deinit_worker (worker);
-#ifdef MHD_SUPPORT_EPOLL
- if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type)
- deinit_epoll (worker);
-#endif /* MHD_SUPPORT_EPOLL */
}
free (d->threading.hier.pool.workers);
#ifndef NDEBUG
@@ -3004,12 +3427,20 @@ deinit_workers_pool (struct MHD_Daemon *restrict d,
/**
- * Nullify worker daemon member that should be set only in master daemon
+ * Nullify worker daemon member that copied from master daemon but must not
+ * be used in worker
* @param d the daemon object
*/
static MHD_FN_PAR_NONNULL_ (1) void
reset_master_only_areas (struct MHD_Daemon *restrict d)
{
+#ifdef MHD_SUPPORT_EPOLL
+ /* In release builds this is done mostly for safety as @a early_fd is used
+ in workers only for asserts */
+ if (mhd_POLL_TYPE_EPOLL == d->events.poll_type)
+ d->events.data.epoll.early_fd = MHD_INVALID_SOCKET;
+#endif /* MHD_SUPPORT_EPOLL */
+
#ifdef MHD_SUPPORT_AUTH_DIGEST
memset (&(d->auth_dg.nonces_lock),
0x7F,
@@ -3094,35 +3525,17 @@ init_workers_pool (struct MHD_Daemon *restrict d,
if (conn_remainder > i)
worker->conns.cfg.count_limit++; /* Distribute the reminder */
#ifdef MHD_SUPPORT_EPOLL
- if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type)
- {
- if (0 == i)
- {
- mhd_assert (0 <= d->events.data.epoll.e_fd);
- /* Move epoll control FD from the master daemon to the first worker */
- /* The FD has been copied by memcpy(). Clean-up the master daemon. */
- d->events.data.epoll.e_fd = MHD_INVALID_SOCKET;
- }
- else
- res = init_epoll (worker,
- true);
- }
-#endif /* MHD_SUPPORT_EPOLL */
- if (MHD_SC_OK == res)
- {
- res = init_worker (worker,
- s);
- if (MHD_SC_OK == res)
- continue; /* Process the next worker */
-
- /* Below is a clean-up of the current slot */
-
-#ifdef MHD_SUPPORT_EPOLL
- if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type)
- deinit_epoll (worker);
+ mhd_assert ((mhd_POLL_TYPE_EPOLL != worker->events.poll_type) ||
+ (0 != i) ||
+ (0 < d->events.data.epoll.early_fd));
+ mhd_assert ((mhd_POLL_TYPE_EPOLL != worker->events.poll_type) ||
+ (0 == i) ||
+ (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd));
#endif /* MHD_SUPPORT_EPOLL */
- }
- break;
+ res = init_worker (worker,
+ s);
+ if (MHD_SC_OK != res)
+ break;
}
if (num_workers == i)
{
diff --git a/src/mhd2/events_process.c b/src/mhd2/events_process.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2024 Evgeny Grin (Karlson2k)
+ Copyright (C) 2024-2026 Evgeny Grin (Karlson2k)
GNU libmicrohttpd is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -52,6 +52,7 @@
#if defined(MHD_USE_TRACE_SUSPEND_RESUME) || defined(MHD_USE_TRACE_POLLING_FDS)
# include <stdio.h>
+# include <string.h>
#endif /* MHD_USE_TRACE_SUSPEND_RESUME || MHD_USE_TRACE_POLLING_FDS */
#include "mhd_locks.h"
@@ -62,6 +63,7 @@
#ifdef MHD_SUPPORT_EPOLL
# include <sys/epoll.h>
#endif
+#include "sys_kqueue.h"
#ifdef MHD_SOCKETS_KIND_POSIX
# include "sys_errno.h"
#endif
@@ -72,6 +74,7 @@
#include "mhd_dbg_print.h"
#include "mhd_sockets_macros.h"
+#include "mhd_socket_error_funcs.h"
#include "mhd_daemon.h"
#include "mhd_connection.h"
@@ -158,9 +161,119 @@ dbg_print_fd_state_update (const char *fd_name,
}
+# ifdef MHD_SUPPORT_KQUEUE
+
+static const char *
+mhd_dbg_kefilter_to_name (const struct kevent *ke)
+{
+ switch (ke->filter)
+ {
+ case EVFILT_READ:
+ return "READ ";
+ case EVFILT_WRITE:
+ return "WRITE";
+ default:
+ break;
+ }
+ return "OTHER";
+}
+
+
+#define mhd_DBG_KEFLAGS_BUF_SIZE 512
+
+static void
+mdd_dbg_keflags_to_text (const struct kevent *ke,
+ char buf[mhd_DBG_KEFLAGS_BUF_SIZE])
+{
+ static const size_t buf_size = mhd_DBG_KEFLAGS_BUF_SIZE;
+ size_t len = 0u;
+ const unsigned int keflags = ke->flags;
+ unsigned int extra_flags;
+ buf[0] = '\0';
+
+ if (0 != (EV_ADD & keflags))
+ strcat (buf, "ADD|");
+ if (0 != (EV_ENABLE & keflags))
+ strcat (buf, "ENABLE|");
+ if (0 != (EV_DISABLE & keflags))
+ strcat (buf, "DISABLE|");
+ if (0 != (EV_DISPATCH & keflags))
+ strcat (buf, "DISPATCH|");
+ if (0 != (EV_DELETE & keflags))
+ strcat (buf, "DELETE|");
+ if (0 != (EV_RECEIPT & keflags))
+ strcat (buf, "RECEIPT|");
+ if (0 != (EV_ONESHOT & keflags))
+ strcat (buf, "ONESHOT|");
+ if (0 != (EV_CLEAR & keflags))
+ strcat (buf, "CLEAR|");
+ if (0 != (EV_EOF & keflags))
+ strcat (buf, "EOF|");
+ if (0 != (EV_ERROR & keflags))
+ strcat (buf, "ERROR|");
+#ifdef EV_KEEPUDATA
+ if (0 != (EV_KEEPUDATA & keflags))
+ strcat (buf, "KEEPUDATA|");
+#endif /* EV_KEEPUDATA */
+
+ len = strlen (buf);
+ mhd_assert (buf_size > len);
+
+ extra_flags =
+ (~((unsigned int) (EV_ADD | EV_ENABLE | EV_DISABLE | EV_DISPATCH | EV_DELETE
+ | EV_RECEIPT | EV_ONESHOT | EV_CLEAR | EV_EOF | EV_ERROR
+ | mhd_EV_KEEPUDATA_OR_ZERO))) & keflags;
+
+ if (0u != extra_flags)
+ {
+ (void) snprintf (buf + len,
+ buf_size - len,
+ "0x%02X|",
+ extra_flags);
+ len = strlen (buf);
+ mhd_assert (buf_size > len);
+ }
+
+ if (0u == len)
+ strcpy (buf, "0");
+ else
+ buf[len - 1u] = '\0'; /* Erase last '|' */
+}
+
+
+MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void
+mhd_dbg_print_kevent (const char *fd_name,
+ const struct kevent *ke,
+ bool update_req)
+{
+ char flags_txt[mhd_DBG_KEFLAGS_BUF_SIZE];
+ const char *action_name =
+ update_req ? "Update FD watching" : "FD state update";
+
+ mdd_dbg_keflags_to_text (ke,
+ flags_txt);
+
+ fprintf (stderr,
+ "### %s: %4s [%2llu]; filter: %s; flags: %s;\t"
+ "fflags: %u;\tdata %lld\n",
+ action_name,
+ fd_name,
+ (unsigned long long) ke->ident,
+ mhd_dbg_kefilter_to_name (ke),
+ flags_txt,
+ (unsigned int) ke->fflags,
+ (long long) ke->data);
+}
+
+
+# endif /* MHD_SUPPORT_KQUEUE */
+
#else /* ! MHD_USE_TRACE_POLLING_FDS */
# define dbg_print_fd_state_update(fd_n,fd,r_ready,w_ready,e_ready) \
((void) 0)
+# ifdef MHD_SUPPORT_KQUEUE
+# define mhd_dbg_print_kq_fd_mon_req(fd_name,ke)
+# endif /* MHD_SUPPORT_KQUEUE */
#endif /* ! MHD_USE_TRACE_POLLING_FDS */
#ifdef MHD_SUPPORT_THREADS
@@ -275,6 +388,21 @@ mhd_daemon_get_wait_max (const struct MHD_Daemon *restrict d)
#endif
return 0;
}
+#ifdef MHD_SUPPORT_KQUEUE
+ if (mhd_D_IS_USING_KQUEUE (d))
+ {
+ if ((NULL != mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)) &&
+ ! mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)->events.kq.monitored)
+ {
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### mhd_daemon_get_wait_max(daemon) -> zero "
+ "(kqueue unmonitored connection(s) pending)\n");
+#endif
+ return 0;
+ }
+ }
+#endif /* MHD_SUPPORT_KQUEUE */
ret = mhd_daemon_get_wait_erliest_timeout (d);
@@ -1837,6 +1965,484 @@ get_all_net_updates_by_epoll (struct MHD_Daemon *restrict d)
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+
+static MHD_FN_PAR_NONNULL_ALL_
+MHD_FN_PAR_IN_ (2) void
+kqueue_handle_missed_change (struct MHD_Daemon *restrict d,
+ const struct kevent *restrict upd_event)
+{
+ mhd_assert (mhd_D_IS_USING_KQUEUE (d));
+ mhd_ASSUME (mhd_SOCKET_REL_PTRMARKER_EMPTY != mhd_KE_GET_UDATA (upd_event));
+ mhd_ASSUME (mhd_SOCKET_REL_PTRMARKER_ITC != mhd_KE_GET_UDATA (upd_event));
+
+ if (mhd_SOCKET_REL_PTRMARKER_LISTEN == mhd_KE_GET_UDATA (upd_event))
+ {
+ return;
+ }
+ else
+ {
+ struct MHD_Connection *const restrict c =
+ (struct MHD_Connection *) mhd_KE_GET_UDATA (upd_event);
+
+ mhd_ASSUME (d == c->daemon);
+
+ mhd_conn_start_closing_no_sys_res (c);
+ mhd_conn_pre_clean (c);
+ mhd_conn_remove_from_daemon (c);
+ mhd_conn_close_final (c);
+ }
+}
+
+
+static MHD_FN_PAR_NONNULL_ALL_
+MHD_FN_PAR_IN_SIZE_ (2,3) void
+kqueue_handle_missed_changes (struct MHD_Daemon *restrict d,
+ struct kevent *restrict kes,
+ int num_elements)
+{
+ int i;
+
+ mhd_ASSUME (0 < num_elements);
+
+ for (i = 0; i < num_elements; ++i)
+ kqueue_handle_missed_change (d,
+ kes + i);
+}
+
+
+static MHD_FN_PAR_NONNULL_ALL_ MHD_FN_MUST_CHECK_RESULT_ int
+update_kqueue_monitoring (struct MHD_Daemon *restrict d)
+{
+ struct MHD_Connection *c;
+ struct kevent *restrict kes = d->events.data.kq.kes;
+ int num_updates;
+ const int max_changes = (int) d->events.data.kq.num_elements;
+
+ mhd_assert (mhd_D_IS_USING_KQUEUE (d));
+ mhd_assert (NULL != kes);
+ mhd_assert (2 <= max_changes);
+
+ num_updates = 0;
+
+ if (MHD_INVALID_SOCKET != d->net.listen.fd)
+ {
+ mhd_assert (! d->net.listen.is_broken);
+
+ mhd_KE_SET (kes + num_updates,
+ d->net.listen.fd,
+ EVFILT_READ,
+ (d->conns.block_new ? EV_DISABLE : EV_ENABLE)
+ | mhd_EV_KEEPUDATA_OR_ZERO,
+ mhd_SOCKET_REL_PTRMARKER_LISTEN);
+
+ mhd_dbg_print_kevent_change ("lstn",
+ kes + num_updates);
+ ++num_updates;
+ }
+
+ /* Process unmonitored connections starting from the earliest added
+ unmonitored connection */
+
+ c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
+
+ if ((NULL == c) || (c->events.kq.monitored))
+ return num_updates;
+
+ while (1)
+ {
+ struct MHD_Connection *const next_c = mhd_DLINKEDL_GET_NEXT (c,all_conn);
+ if (NULL == next_c)
+ break; /* Found the end of the list */
+ if (next_c->events.kq.monitored)
+ break; /* Found the earliest added unmonitored connection */
+ c = next_c;
+ }
+
+ mhd_ASSUME (NULL != c);
+ mhd_ASSUME (! c->events.kq.monitored);
+
+ for ((void) c; NULL != c; c = mhd_DLINKEDL_GET_PREV (c,all_conn))
+ {
+ mhd_ASSUME (! c->events.kq.monitored);
+
+ mhd_assert (! is_conn_excluded_from_http_comm (c));
+ mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage);
+
+ /* Check for the space for two filters */
+ if ((max_changes - 1) <= num_updates)
+ {
+ /* Too many updates for a single kevent() call */
+ static const struct timespec zero_timeout = {0, 0};
+ int res;
+
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Starting) kevent(%d, changes, %d, [NULL], "
+ "0, [0, 0])...\n",
+ d->events.data.kq.kq_fd,
+ num_updates);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ res = mhd_kevent (d->events.data.kq.kq_fd,
+ kes,
+ num_updates,
+ NULL,
+ 0,
+ &zero_timeout);
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Finished) kevent(%d, changes, %d, [NULL], "
+ "0, [0, 0]) -> %d\n",
+ d->events.data.kq.kq_fd,
+ num_updates,
+ res);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ if (0 > res)
+ {
+ if (EINTR != errno)
+ kqueue_handle_missed_changes (d,
+ kes,
+ num_updates);
+
+ }
+ num_updates = 0;
+ }
+
+ /* Add recv() filter */
+ mhd_KE_SET (kes + num_updates,
+ c->sk.fd,
+ EVFILT_READ,
+ EV_ADD | EV_CLEAR, /* 'EV_CLEAR' means edge trigger */
+ c);
+ mhd_dbg_print_kevent_change ("conn",
+ kes + num_updates);
+
+ ++num_updates;
+
+ /* Add send() filter */
+ mhd_KE_SET (kes + num_updates,
+ c->sk.fd,
+ EVFILT_WRITE,
+ EV_ADD | EV_CLEAR, /* 'EV_CLEAR' means edge trigger */
+ c);
+ mhd_dbg_print_kevent_change ("conn",
+ kes + num_updates);
+
+ ++num_updates;
+
+ c->events.kq.monitored = true;
+
+ mhd_assert (0 < num_updates);
+ }
+
+ mhd_assert (0 <= num_updates);
+ mhd_assert (num_updates <= (int) d->events.data.kq.num_elements);
+
+ return num_updates;
+}
+
+
+/**
+ * Map events provided by kqueue to connection states, ITC and
+ * listen socket states
+ */
+static MHD_FN_PAR_NONNULL_ (1) bool
+update_statuses_from_kevents (struct MHD_Daemon *restrict d,
+ unsigned int num_events)
+{
+ unsigned int i;
+ struct kevent *restrict kes = d->events.data.kq.kes;
+
+ mhd_assert (mhd_D_IS_USING_KQUEUE (d));
+
+ for (i = 0u; num_events > i; ++i)
+ {
+ struct kevent *const e = kes + i;
+ bool eof_ready;
+#ifdef MHD_SUPPORT_THREADS
+ if (mhd_SOCKET_REL_PTRMARKER_ITC == mhd_KE_GET_UDATA (e))
+ {
+ mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
+ mhd_assert (mhd_itc_r_fd (d->threading.itc) == (int) e->ident);
+ mhd_assert (EVFILT_READ == e->filter);
+ mhd_assert (0 == (e->flags & EV_ERROR));
+
+ eof_ready = (0 != (e->flags & EV_EOF));
+
+ mhd_dbg_print_kevent_report ("ITC",
+ e);
+
+ if (eof_ready)
+ {
+ log_itc_broken (d);
+ /* ITC is broken, need to stop the daemon thread now as otherwise
+ application will not be able to stop the thread. */
+ return false;
+ }
+ /* Clear ITC here, before other data processing.
+ Any external events will activate ITC again if additional data to
+ process is added externally. Clearing ITC early ensures that new data
+ (which followed by ITC activation) will not be missed. */
+ mhd_itc_clear (d->threading.itc);
+ }
+ else
+#endif /* MHD_SUPPORT_THREADS */
+ if (mhd_SOCKET_REL_PTRMARKER_LISTEN == mhd_KE_GET_UDATA (e))
+ {
+ bool listen_broken;
+ mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd);
+ mhd_assert (d->net.listen.fd == (int) e->ident);
+ mhd_assert (EVFILT_READ == e->filter);
+
+ eof_ready = (0 != (e->flags & EV_EOF));
+
+ mhd_dbg_print_kevent_report ("lstn",
+ e);
+
+ listen_broken = false;
+ if (eof_ready)
+ listen_broken = true;
+ else if ((0 != (e->flags & EV_ERROR)))
+ listen_broken = true;
+
+ if (listen_broken)
+ {
+ log_listen_broken (d);
+
+ /* Close the listening socket unless the master daemon should close it */
+ if (! mhd_D_HAS_MASTER (d))
+ mhd_socket_close (d->net.listen.fd);
+ else
+ {
+ static const struct timespec zero_timeout = {0, 0};
+ struct kevent remove_listen;
+ int res;
+
+ mhd_KE_SET (&remove_listen,
+ d->net.listen.fd,
+ EVFILT_READ,
+ EV_DELETE,
+ mhd_SOCKET_REL_PTRMARKER_LISTEN);
+
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Starting) kevent(%d, changes, 1, [NULL], "
+ "0, [0, 0])...\n",
+ d->events.data.kq.kq_fd);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ res = mhd_kevent (d->events.data.kq.kq_fd,
+ &remove_listen,
+ 1,
+ NULL,
+ 0,
+ &zero_timeout);
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Finished) kevent(%d, changes, 1, [NULL], "
+ "0, [0, 0]) -> %d\n",
+ d->events.data.kq.kq_fd,
+ res);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ /* Ignore possible error as the socket could be already removed
+ from the kqueue monitoring by closing the socket */
+ (void) res;
+ }
+
+ d->events.accept_pending = false;
+ d->net.listen.is_broken = true;
+ d->net.listen.fd = MHD_INVALID_SOCKET;
+ }
+ else
+ d->events.accept_pending = true;
+ }
+ else
+ {
+ bool err_ready;
+ bool recv_ready;
+ bool send_ready;
+ struct MHD_Connection *const restrict c =
+ (struct MHD_Connection *) mhd_KE_GET_UDATA (e);
+
+ mhd_ASSUME (d == c->daemon);
+ mhd_assert (c->events.kq.monitored);
+ mhd_ASSUME (mhd_SOCKET_REL_PTRMARKER_EMPTY != mhd_KE_GET_UDATA (e));
+
+ mhd_dbg_print_kevent_report ("conn",
+ e);
+
+ if ((0 != (e->flags & EV_ERROR)))
+ {
+ /* Error adding connection to monitoring */
+ kqueue_handle_missed_change (d,
+ e);
+
+ continue;
+ }
+
+ eof_ready = (0 != (e->flags & EV_EOF));
+ err_ready = (eof_ready && (0 != e->fflags));
+
+ if (err_ready)
+ {
+ c->sk.state.discnt_err =
+ mhd_socket_error_get_from_sys_err ((int) e->fflags);
+ mhd_assert (mhd_SOCKET_ERR_IS_HARD (c->sk.state.discnt_err));
+ }
+ /* This is a tricky processing as each "filter" updates only its own
+ side of the monitoring, not giving a picture of a complete socket
+ readiness. */
+
+ if (EVFILT_READ == e->filter)
+ {
+ recv_ready = true;
+ send_ready = mhd_SCKT_NET_ST_HAS_FLAG_SEND (c->sk.ready);
+ }
+ else
+ {
+ mhd_assert (EVFILT_WRITE == e->filter);
+ recv_ready = mhd_SCKT_NET_ST_HAS_FLAG_RECV (c->sk.ready);
+ send_ready = true;
+ }
+
+ update_conn_net_status (d,
+ c,
+ recv_ready,
+ send_ready,
+ err_ready
+ || mhd_SCKT_NET_ST_HAS_FLAG_ERROR (c->sk.ready));
+ }
+ }
+ return true;
+}
+
+
+/**
+ * Update states of all connections, check for connection pending
+ * to be accept()'ed, check for the events on ITC.
+ */
+static MHD_FN_PAR_NONNULL_ (1) bool
+get_all_net_updates_by_kqueue (struct MHD_Daemon *restrict d)
+{
+ int max_events;
+ int num_events;
+ int num_updates;
+ size_t events_processed;
+ uint_fast64_t max_wait;
+ struct timespec ke_timeout;
+
+ mhd_assert (mhd_D_IS_USING_KQUEUE (d));
+ mhd_assert (0 < d->events.data.kq.kq_fd);
+ mhd_assert (0 < (int) (d->events.data.kq.num_elements));
+ mhd_assert (0 != d->events.data.kq.num_elements);
+ mhd_assert (0 != d->conns.cfg.count_limit);
+ mhd_assert (d->events.data.kq.num_elements == d->dbg.num_events_elements);
+
+ num_updates = update_kqueue_monitoring (d);
+ mhd_ASSUME (0 <= num_updates);
+
+ /* Minimise amount of data passed from userspace to kernel and back */
+ max_events = (int) (d->conns.count * 2);
+#ifdef MHD_SUPPORT_THREADS
+ ++max_events;
+#endif /* MHD_SUPPORT_THREADS */
+ if (MHD_INVALID_SOCKET != d->net.listen.fd)
+ ++max_events;
+ /* Make sure that one extra slot used to clearly detect that all events
+ were gotten (if all provided slots are used then extra event could be
+ pending still). */
+ ++max_events;
+ if ((0 >= max_events) ||
+ (max_events > (int) d->events.data.kq.num_elements))
+ max_events = (int) d->events.data.kq.num_elements;
+
+ max_wait = mhd_daemon_get_wait_max (d);
+ ke_timeout.tv_sec = (time_t) (max_wait / 1000);
+ ke_timeout.tv_nsec = (long) ((max_wait % 1000) * 1000000L);
+ events_processed = 0;
+ do
+ {
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ if (max_wait == MHD_WAIT_INDEFINITELY)
+ fprintf (stderr,
+ "### (Starting) kevent(%d, changes, %d, events, "
+ "%d, [NULL])...\n",
+ d->events.data.kq.kq_fd,
+ num_updates,
+ max_events);
+ else
+ fprintf (stderr,
+ "### (Starting) kevent(%d, changes, %d, events, "
+ "%d, [%llu, %llu])...\n",
+ d->events.data.kq.kq_fd,
+ num_updates,
+ max_events,
+ (unsigned long long) ke_timeout.tv_sec,
+ (unsigned long long) ke_timeout.tv_nsec);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+ num_events =
+ kevent (d->events.data.kq.kq_fd,
+ d->events.data.kq.kes,
+ num_updates,
+ d->events.data.kq.kes,
+ max_events,
+ (max_wait == MHD_WAIT_INDEFINITELY) ? NULL : &ke_timeout);
+#ifdef MHD_USE_TRACE_POLLING_FDS
+ if (max_wait == MHD_WAIT_INDEFINITELY)
+ fprintf (stderr,
+ "### (Finished) kevent(%d, changes, %d, events, "
+ "%d, [NULL]) -> %d\n",
+ d->events.data.kq.kq_fd,
+ num_updates,
+ max_events,
+ num_events);
+ else
+ fprintf (stderr,
+ "### (Finished) kevent(%d, changes, %d, events, "
+ "%d, [%llu, %llu]) -> %d\n",
+ d->events.data.kq.kq_fd,
+ num_updates,
+ max_events,
+ (unsigned long long) ke_timeout.tv_sec,
+ (unsigned long long) ke_timeout.tv_nsec,
+ num_events);
+#endif /* MHD_USE_TRACE_POLLING_FDS */
+
+ if (0 > num_events)
+ {
+ const int err = errno;
+ if (EINTR == err)
+ return true; /* EINTR, try next time */
+
+ mhd_LOG_MSG (d, MHD_SC_KQUEUE_HARD_ERROR, \
+ "The kevent() encountered unrecoverable error.");
+ return false;
+ }
+ if (! update_statuses_from_kevents (d,
+ (unsigned int) num_events))
+ return false;
+ if (max_events > num_events)
+ return true; /* All events have been read */
+
+ /* Use all slots for the next round(s) of getting events */
+ max_events = (int) d->events.data.kq.num_elements;
+ max_wait = 0; /* Do not block on the next getting events rounds */
+ ke_timeout.tv_sec = 0;
+ ke_timeout.tv_nsec = 0;
+
+ mhd_assert (0 < max_events);
+
+ /* If too many events are coming - process events that have been read already */
+ events_processed += (size_t) num_events;
+ } while ((events_processed < (d->conns.cfg.count_limit * 2))
+ || (events_processed < (d->conns.cfg.count_limit * 2) + 2));
+
+ return true;
+}
+
+
+#endif /* MHD_SUPPORT_KQUEUE */
+
+
/**
* Close timed-out connections (if any)
* @param d the daemon to use
@@ -1959,6 +2565,13 @@ process_all_events_and_data (struct MHD_Daemon *restrict d)
daemon_resume_conns_if_needed (d);
break;
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ case mhd_POLL_TYPE_KQUEUE:
+ if (! get_all_net_updates_by_kqueue (d))
+ return false;
+ daemon_resume_conns_if_needed (d);
+ break;
+#endif /* MHD_SUPPORT_KQUEUE */
#ifndef MHD_SUPPORT_SELECT
case mhd_POLL_TYPE_SELECT:
#endif /* ! MHD_SUPPORT_SELECT */
diff --git a/src/mhd2/lib_get_info.c b/src/mhd2/lib_get_info.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2024-2025 Evgeny Grin (Karlson2k)
+ Copyright (C) 2024-2026 Evgeny Grin (Karlson2k)
GNU libmicrohttpd is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -351,6 +351,11 @@ MHD_lib_get_info_fixed_sz (enum MHD_LibInfoFixed info_type,
#else
output_buf->v_types_sockets_polling.tech_epoll = MHD_NO;
#endif
+#ifdef MHD_SUPPORT_KQUEUE
+ output_buf->v_types_sockets_polling.tech_kqueue = MHD_YES;
+#else
+ output_buf->v_types_sockets_polling.tech_kqueue = MHD_NO;
+#endif
return MHD_SC_OK;
case MHD_LIB_INFO_FIXED_SUPPORT_AGGREGATE_FD:
if (sizeof(output_buf->v_support_aggregate_fd_bool) > output_buf_size)
diff --git a/src/mhd2/mhd_connection.h b/src/mhd2/mhd_connection.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2014-2025 Evgeny Grin (Karlson2k)
+ Copyright (C) 2014-2026 Evgeny Grin (Karlson2k)
Copyright (C) 2007-2018 Daniel Pittman and Christian Grothoff
GNU libmicrohttpd is free software; you can redistribute it and/or
@@ -541,6 +541,18 @@ struct mhd_ConnExtrEvents
enum MHD_FdState reg_for;
};
+#ifdef MHD_SUPPORT_KQUEUE
+/**
+ * Connection's kqueue event data
+ */
+struct mhd_ConnKqEvents
+{
+ /**
+ * 'true' if connection has been added to kqueue monitoring
+ */
+ bool monitored;
+};
+#endif /* MHD_SUPPORT_KQUEUE */
/**
* The connection events data
*/
@@ -550,6 +562,13 @@ union mhd_ConnEvents
* The connection's external event data
*/
struct mhd_ConnExtrEvents extrn;
+
+#ifdef MHD_SUPPORT_KQUEUE
+ /**
+ * Connection's kqueue event data
+ */
+ struct mhd_ConnKqEvents kq;
+#endif /* MHD_SUPPORT_KQUEUE */
};
diff --git a/src/mhd2/mhd_daemon.h b/src/mhd2/mhd_daemon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2024 Evgeny Grin (Karlson2k)
+ Copyright (C) 2024-2026 Evgeny Grin (Karlson2k)
GNU libmicrohttpd is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -76,6 +76,7 @@
#ifdef MHD_SUPPORT_EPOLL
# include <sys/epoll.h>
#endif
+#include "sys_kqueue.h"
#include "mempool_types.h"
@@ -248,6 +249,13 @@ enum MHD_FIXED_ENUM_ mhd_IntPollType
*/
mhd_POLL_TYPE_EPOLL = MHD_SPS_EPOLL
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ ,
+ /**
+ * Use kqueue.
+ */
+ mhd_POLL_TYPE_KQUEUE = MHD_SPS_KQUEUE
+#endif /* MHD_SUPPORT_KQUEUE */
};
@@ -261,8 +269,19 @@ enum MHD_FIXED_ENUM_ mhd_IntPollType
# define mhd_POLL_TYPE_INT_IS_EPOLL(poll_type) (0)
#endif
+#ifdef MHD_SUPPORT_KQUEUE
+/**
+ * Check whether provided mhd_IntPollType value is "kqueue"
+ */
+# define mhd_POLL_TYPE_INT_IS_KQUEUE(poll_type) \
+ (mhd_POLL_TYPE_KQUEUE == (poll_type))
+#else
+# define mhd_POLL_TYPE_INT_IS_KQUEUE(poll_type) (0)
+#endif
+
#define mhd_POLL_TYPE_INT_IS_EDGE_TRIG(poll_type) \
- mhd_POLL_TYPE_INT_IS_EPOLL (poll_type)
+ (mhd_POLL_TYPE_INT_IS_EPOLL (poll_type) \
+ || mhd_POLL_TYPE_INT_IS_KQUEUE (poll_type))
#if defined(HAVE_UINTPTR_T)
typedef uintptr_t mhd_SockRelMarker;
@@ -376,15 +395,48 @@ struct mhd_DaemonEventsEPollData
* The array of events reported by epoll.
*/
struct epoll_event *events;
-
/**
* The number of elements in the allocated @a events arrays.
*/
size_t num_elements;
+ /**
+ * The epoll control FD, created by probing during early daemon
+ * initialisation.
+ * Temporal location.
+ * Must be moved to @a e_fd with events initialisation.
+ */
+ int early_fd;
};
#endif
+#ifdef MHD_SUPPORT_KQUEUE
+/**
+ * Daemon's parameters and pointers to the preallocated memory for running
+ * sockets monitoring by kqueue.
+ */
+struct mhd_DaemonEventsKQueueData
+{
+ /**
+ * The kqueue FD.
+ */
+ int kq_fd;
+
+ /**
+ * The array of kevents used for both registering filters and getting events
+ */
+ struct kevent *kes;
+
+ /**
+ * The number of elements in the allocated @a kes array.
+ *
+ * Note: kqueue API uses 'int' for the number of elements
+ */
+ unsigned int num_elements;
+};
+
+#endif /* MHD_SUPPORT_KQUEUE */
+
/**
* Daemon's data for external events callback.
* Internal version of struct MHD_WorkModeExternalEventLoopCBParam.
@@ -498,6 +550,14 @@ union mhd_DaemonEventMonitoringTypeSpecificData
struct mhd_DaemonEventsEPollData epoll;
#endif
+#ifdef MHD_SUPPORT_KQUEUE
+ /**
+ * Daemon's parameters and pointers to the preallocated memory for running
+ * sockets monitoring by kqueue.
+ */
+ struct mhd_DaemonEventsKQueueData kq;
+#endif
+
/**
* Daemon's data for external events for sockets monitoring.
*/
@@ -754,14 +814,6 @@ struct mhd_DaemonNetwork
* The listening socket
*/
struct mhd_ListenSocket listen;
-
-#ifdef MHD_SUPPORT_EPOLL
- /**
- * The epoll FD.
- * Set to '-1' when epoll is not used.
- */
- int epoll_fd;
-#endif
/**
* Configured settings for the daemon's network data
*/
@@ -912,7 +964,6 @@ enum MHD_FIXED_ENUM_ mhd_DaemonType
*/
#define mhd_D_TYPE_IS_INTERNAL_ONLY(t) \
(mhd_DAEMON_TYPE_WORKER == (t))
-
/**
* Check whether the daemon type is allowed to process the network data
*/
@@ -1286,6 +1337,7 @@ struct mhd_daemon_debug
bool net_deinited;
bool master_only_inited;
bool worker_only_inited;
+ bool events_fd_inited;
bool tls_inited;
bool events_allocated;
unsigned int num_events_elements;
@@ -1421,6 +1473,9 @@ struct MHD_Daemon
#define mhd_D_IS_USING_EPOLL(d) \
mhd_POLL_TYPE_INT_IS_EPOLL ((d)->events.poll_type)
+#define mhd_D_IS_USING_KQUEUE(d) \
+ mhd_POLL_TYPE_INT_IS_KQUEUE ((d)->events.poll_type)
+
#define mhd_D_POLL_IS_EDGE_TRIG(d) \
mhd_POLL_TYPE_INT_IS_EDGE_TRIG ((d)->events.poll_type)
diff --git a/src/mhd2/mhd_dbg_print.h b/src/mhd2/mhd_dbg_print.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2025 Evgeny Grin (Karlson2k)
+ Copyright (C) 2025-2026 Evgeny Grin (Karlson2k)
GNU libmicrohttpd is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -50,6 +50,7 @@
#ifdef MHD_USE_TRACE_POLLING_FDS
# include "sys_bool_type.h"
# include "mhd_socket_type.h"
+# include "sys_kqueue.h"
#endif /* MHD_USE_TRACE_POLLING_FDS */
@@ -89,7 +90,27 @@ MHD_FN_PAR_NONNULL_ALL_;
#else /* ! MHD_USE_TRACE_POLLING_FDS */
# define mhd_dbg_print_fd_mon_req(fd_n,fd,r_ready,w_ready,e_ready) ((void) 0)
+# ifdef MHD_SUPPORT_KQUEUE
+# define mhd_dbg_print_kevent(fd_name,ke,update_req) ((void) 0)
+# endif /* MHD_SUPPORT_KQUEUE */
#endif /* ! MHD_USE_TRACE_POLLING_FDS */
+#ifdef MHD_SUPPORT_KQUEUE
+/**
+ * Debug print kqueue event request update
+ * @param fd_name the name of FD ("ITC", "lstn" or "conn")
+ * @param ke the pointer to kevent
+ */
+# define mhd_dbg_print_kevent_change(fd_name,ke) \
+ mhd_dbg_print_kevent ((fd_name),(ke),true)
+
+/**
+ * Debug print kqueue event report
+ * @param fd_name the name of FD ("ITC", "lstn" or "conn")
+ * @param ke the pointer to kevent
+ */
+# define mhd_dbg_print_kevent_report(fd_name,ke) \
+ mhd_dbg_print_kevent ((fd_name),(ke),false)
+#endif /* MHD_SUPPORT_KQUEUE */
#endif /* ! MHD_DBG_PRINT_H */
diff --git a/src/mhd2/stream_funcs.c b/src/mhd2/stream_funcs.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2022-2024 Evgeny Grin (Karlson2k)
+ Copyright (C) 2022-2026 Evgeny Grin (Karlson2k)
GNU libmicrohttpd is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -52,11 +52,13 @@
#ifdef MHD_USE_TRACE_CONN_ADD_CLOSE
# include <stdio.h>
#endif /* MHD_USE_TRACE_CONN_ADD_CLOSE */
+#include "mhd_dbg_print.h"
#include <string.h>
#include "extr_events_funcs.h"
#ifdef MHD_SUPPORT_EPOLL
# include <sys/epoll.h>
#endif
+#include "sys_kqueue.h"
#include "sys_malloc.h"
#include "mhd_daemon.h"
@@ -1063,11 +1065,67 @@ mhd_conn_pre_clean_part1 (struct MHD_Connection *restrict c)
c->sk.fd,
&event))
{
- mhd_LOG_MSG (c->daemon, MHD_SC_EPOLL_CTL_REMOVE_FAILED,
+ mhd_LOG_MSG (c->daemon, MHD_SC_EVENTS_CONN_REMOVE_FAILED,
"Failed to remove connection socket from epoll.");
}
}
#endif /* MHD_SUPPORT_EPOLL */
+#ifdef MHD_SUPPORT_KQUEUE
+ else if (mhd_D_IS_USING_KQUEUE (c->daemon))
+ {
+# ifdef MHD_SUPPORT_UPGRADE
+ /* Remove socket from kqueue monitoring only if upgrading.
+ If connection is being closed, the socket is removed automatically
+ when the socket is closed. */
+ if (mhd_HTTP_STAGE_UPGRADING == c->stage)
+ {
+ static const struct timespec zero_timeout = {0, 0};
+ struct kevent events[2];
+ int res;
+
+ mhd_KE_SET (events + 0u,
+ c->sk.fd,
+ EVFILT_WRITE,
+ EV_DELETE,
+ c);
+ mhd_KE_SET (events + 1u,
+ c->sk.fd,
+ EVFILT_READ,
+ EV_DELETE,
+ c);
+
+# ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Starting) kevent(%d, changes, 2, [NULL], "
+ "0, [0, 0])...\n",
+ c->daemon->events.data.kq.kq_fd);
+# endif /* MHD_USE_TRACE_POLLING_FDS */
+ res = mhd_kevent (c->daemon->events.data.kq.kq_fd,
+ events,
+ 2,
+ NULL,
+ 0,
+ &zero_timeout);
+# ifdef MHD_USE_TRACE_POLLING_FDS
+ fprintf (stderr,
+ "### (Finished) kevent(%d, changes, 2, [NULL], "
+ "0, [0, 0]) -> %d\n",
+ c->daemon->events.data.kq.kq_fd,
+ res);
+# endif /* MHD_USE_TRACE_POLLING_FDS */
+ if (0 > res)
+ {
+ mhd_LOG_MSG (c->daemon, MHD_SC_EVENTS_CONN_REMOVE_FAILED,
+ "Failed to remove upgraded connection socket "
+ "from kqueue monitoring.");
+ /* Continue with monitored socket which may wake-up
+ daemon's monitoring */
+ }
+ }
+# endif /* MHD_SUPPORT_UPGRADE */
+ (void) 0;
+ }
+#endif /* MHD_SUPPORT_KQUEUE */
}
diff --git a/src/mhd2/sys_kqueue.h b/src/mhd2/sys_kqueue.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
+/*
+ This file is part of GNU libmicrohttpd.
+ Copyright (C) 2026 Evgeny Grin (Karlson2k)
+
+ GNU libmicrohttpd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ GNU libmicrohttpd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ Alternatively, you can redistribute GNU libmicrohttpd and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version, together
+ with the eCos exception, as follows:
+
+ As a special exception, if other files instantiate templates or
+ use macros or inline functions from this file, or you compile this
+ file and link it with other works to produce a work based on this
+ file, this file does not by itself cause the resulting work to be
+ covered by the GNU General Public License. However the source code
+ for this file must still be made available in accordance with
+ section (3) of the GNU General Public License v2.
+
+ This exception does not invalidate any other reasons why a work
+ based on this file might be covered by the GNU General Public
+ License.
+
+ You should have received copies of the GNU Lesser General Public
+ License and the GNU General Public License along with this library;
+ if not, see <https://www.gnu.org/licenses/>.
+*/
+
+/**
+ * @file src/mhd2/sys_kqueue.h
+ * @brief The header for the system kqueue functions and related data types
+ * @author Karlson2k (Evgeny Grin)
+ *
+ * This header includes system macros for kqueue and defines related
+ * MHD macros.
+ */
+
+#ifndef MHD_SYS_KQUEUE_H
+#define MHD_SYS_KQUEUE_H 1
+
+#include "mhd_sys_options.h"
+
+#ifdef MHD_SUPPORT_KQUEUE
+# include "mhd_socket_type.h"
+# ifndef MHD_SOCKETS_KIND_POSIX
+#error Only POSIX type sockets are supported
+# endif
+# ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+# endif /* HAVE_SYS_TYPES_H */
+
+# ifdef HAVE_SYS_TIME_H
+# include <sys/time.h>
+# endif
+
+# include <sys/event.h>
+
+# ifdef HAVE_KQUEUEX
+# ifdef KQUEUE_CLOEXEC
+# define mhd_kqueue() kqueuex (KQUEUE_CLOEXEC)
+# define mhd_KQUEUE_HAS_CLOEXEC_SET() (! 0)
+# else
+# undef HAVE_KQUEUEX /* No use for kqueuex() */
+# endif
+# endif
+
+# ifdef HAVE_KQUEUE1
+# ifdef mhd_kqueue
+# undef HAVE_KQUEUE1 /* No use for kqueue1() */
+# else
+# include <fcntl.h>
+# ifdef O_CLOEXEC
+# define mhd_kqueue() kqueue1 (O_CLOEXEC)
+# define mhd_KQUEUE_HAS_CLOEXEC_SET() (! 0)
+# else
+# undef HAVE_KQUEUE1
+# endif
+# endif
+# endif
+
+# ifndef mhd_kqueue
+# define mhd_kqueue() kqueue ()
+# define mhd_KQUEUE_HAS_CLOEXEC_SET() (0)
+# endif
+
+# ifdef __NetBSD__
+# include <sys/param.h>
+# if __NetBSD_Version__ + 0 < 1000000000
+# define mhd_KE_UDATA_IS_INTPTR 1
+# endif
+# endif
+
+# ifndef mhd_KE_UDATA_IS_INTPTR
+typedef void *mhd_KE_UDATA_TYPE;
+# define mhd_PTR_TO_KE_UDATA(ptr) (ptr)
+# define mhd_KE_UDATA_TO_PTR(ud) (ud)
+# else
+typedef intptr_t mhd_KE_UDATA_TYPE;
+# define mhd_PTR_TO_KE_UDATA(ptr) ((mhd_KE_UDATA_TYPE) (ptr))
+# define mhd_KE_UDATA_TO_PTR(ud) ((void*) (ud))
+# endif
+
+# define mhd_KE_GET_UDATA(ev_ptr) mhd_KE_UDATA_TO_PTR ((ev_ptr)->udata)
+
+# define mhd_KE_SET(ev_ptr,fd,evfltr,evflags,evudata_ptr) do { \
+ struct kevent mhd__ke_tmp = {0u}; \
+ mhd__ke_tmp.ident = (unsigned int) (fd); \
+ mhd__ke_tmp.filter = (evfltr); \
+ mhd__ke_tmp.flags = (evflags); \
+ mhd__ke_tmp.udata = mhd_PTR_TO_KE_UDATA ((evudata_ptr)); \
+ (*(ev_ptr)) = mhd__ke_tmp; } while (0)
+
+# ifdef EV_KEEPUDATA
+# define mhd_EV_KEEPUDATA_OR_ZERO EV_KEEPUDATA
+# else
+# define mhd_EV_KEEPUDATA_OR_ZERO (0)
+# endif
+
+# ifndef __NetBSD__
+# define mhd_kevent(kqfd,chlist,nchs,evlist,nevs,tmout) \
+ kevent ((kqfd),(chlist),(nchs),(evlist),(nevs),(tmout))
+# else /* ! __NetBSD__ */
+# define mhd_kevent(kqfd,chlist,nchs,evlist,nevs,tmout) \
+ kevent ((kqfd),(chlist),(size_t) (nchs),(evlist),(size_t) (nevs), \
+ (tmout))
+# endif
+
+#endif /* MHD_SUPPORT_KQUEUE */
+
+#endif /* ! MHD_SYS_KQUEUE_H */
diff --git a/src/tools/perf_replies.c b/src/tools/perf_replies.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
This file is part of GNU libmicrohttpd.
- Copyright (C) 2023-2025 Evgeny Grin (Karlson2k)
+ Copyright (C) 2023-2026 Evgeny Grin (Karlson2k)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -1854,6 +1854,8 @@ get_mhd_poll_func_name (struct MHD_Daemon *d)
return "poll()";
case MHD_SPS_EPOLL:
return "epoll";
+ case MHD_SPS_KQUEUE:
+ return "kqueue";
case MHD_SPS_AUTO:
default:
break;