daemon_start.c (135527B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2024-2026 Evgeny Grin (Karlson2k) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/daemon_start.c 41 * @brief The implementation of the MHD_daemon_start() 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #include "mhd_sys_options.h" 46 47 #include "mhd_assert.h" 48 #include "mhd_unreachable.h" 49 #include "mhd_assume.h" 50 51 #include "mhd_constexpr.h" 52 53 #include "sys_bool_type.h" 54 #include "sys_base_types.h" 55 #include "sys_malloc.h" 56 #include "compat_calloc.h" 57 58 #include <string.h> 59 #include "sys_sockets_types.h" 60 #include "sys_sockets_headers.h" 61 #include "mhd_sockets_macros.h" 62 #include "sys_ip_headers.h" 63 64 #include "mhd_atomic_counter.h" 65 66 #ifdef MHD_SOCKETS_KIND_POSIX 67 # include "sys_errno.h" 68 #endif 69 #include "sys_select.h" 70 #include "sys_poll.h" 71 #ifdef MHD_SUPPORT_EPOLL 72 # include <sys/epoll.h> 73 #endif 74 #include "sys_kqueue.h" 75 76 #ifdef MHD_SOCKETS_KIND_POSIX 77 # include <fcntl.h> 78 #endif 79 80 #include "extr_events_funcs.h" 81 82 #include "mhd_dbg_print.h" 83 84 #include "mhd_limits.h" 85 86 #include "mhd_daemon.h" 87 #include "daemon_options.h" 88 89 #include "mhd_sockets_funcs.h" 90 91 #include "mhd_lib_init.h" 92 #include "daemon_logger.h" 93 94 #ifdef MHD_SUPPORT_HTTPS 95 # include "mhd_tls_common.h" 96 # include "mhd_tls_funcs.h" 97 #endif 98 99 #include "events_process.h" 100 101 #ifdef MHD_SUPPORT_THREADS 102 # include "mhd_itc.h" 103 # include "mhd_threads.h" 104 # include "daemon_funcs.h" 105 #endif 106 107 #include "mhd_public_api.h" 108 109 110 /** 111 * The default value for fastopen queue length (currently GNU/Linux only) 112 */ 113 #define MHD_TCP_FASTOPEN_DEF_QUEUE_LEN 64 114 115 /** 116 * Release any internally allocated pointers, then deallocate the settings. 117 * @param s the pointer to the settings to release 118 */ 119 static void 120 dsettings_release (struct DaemonOptions *s) 121 { 122 /* Release starting from the last member */ 123 if (NULL != s->random_entropy.v_buf) 124 free (s->random_entropy.v_buf); 125 if (MHD_INVALID_SOCKET != s->listen_socket) 126 mhd_socket_close (s->listen_socket); 127 if (NULL != s->bind_sa.v_sa) 128 free (s->bind_sa.v_sa); 129 if (NULL != s->tls_cert_key.v_mem_cert) 130 free (s->tls_cert_key.v_mem_cert); 131 free (s); 132 } 133 134 135 /** 136 * Set basic daemon parameters that not require additional initialisation. 137 * Mostly copy such parameters from the settings object to the daemon object. 138 * @param d the daemon object 139 * @param s the user settings 140 * @return MHD_SC_OK on success, 141 * the error code otherwise 142 */ 143 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 144 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 145 daemon_set_basic_settings (struct MHD_Daemon *restrict d, 146 struct DaemonOptions *restrict s) 147 { 148 mhd_constexpr uint_fast64_t max_timeout_ms_value = 1209600000u; 149 150 #ifdef MHD_SUPPORT_HTTP2 151 // TODO: make it configurable 152 d->http_cfg.http1x = true; 153 d->http_cfg.http2 = true; 154 #endif /* MHD_SUPPORT_HTTP2 */ 155 156 d->req_cfg.strictness = s->protocol_strict_level.v_sl; 157 158 #ifdef MHD_SUPPORT_COOKIES 159 d->req_cfg.disable_cookies = (MHD_NO != s->disable_cookies); 160 #endif 161 162 d->req_cfg.suppress_date = (MHD_NO != s->suppress_date_header); 163 164 d->conns.cfg.timeout_milsec = s->default_timeout_milsec; 165 if (max_timeout_ms_value < d->conns.cfg.timeout_milsec) 166 d->conns.cfg.timeout_milsec = max_timeout_ms_value; 167 168 d->conns.cfg.per_ip_limit = s->per_ip_limit; 169 170 return MHD_SC_OK; 171 } 172 173 174 /** 175 * Set the daemon work mode. 176 * This function also checks whether requested work mode is supported by 177 * current build and whether work mode is compatible with requested events 178 * polling technique. 179 * @param d the daemon object 180 * @param s the user settings 181 * @return MHD_SC_OK on success, 182 * the error code otherwise 183 */ 184 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 185 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 186 daemon_set_work_mode (struct MHD_Daemon *restrict d, 187 struct DaemonOptions *restrict s) 188 { 189 switch (s->work_mode.mode) 190 { 191 case MHD_WM_EXTERNAL_PERIODIC: 192 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS; 193 break; 194 case MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL: 195 case MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE: 196 if (MHD_SPS_AUTO != s->poll_syscall) 197 { 198 mhd_LOG_MSG ( \ 199 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 200 "The requested work mode is not compatible with setting " \ 201 "socket polling syscall."); 202 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 203 } 204 if (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL == s->work_mode.mode) 205 d->wmode_int = mhd_WM_INT_EXTERNAL_EVENTS_LEVEL; 206 else 207 d->wmode_int = mhd_WM_INT_EXTERNAL_EVENTS_EDGE; 208 break; 209 case MHD_WM_EXTERNAL_SINGLE_FD_WATCH: 210 if ((MHD_SPS_AUTO != s->poll_syscall) && 211 (MHD_SPS_EPOLL != s->poll_syscall) && 212 (MHD_SPS_KQUEUE != s->poll_syscall)) 213 { 214 mhd_LOG_MSG ( \ 215 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 216 "The requested work mode MHD_WM_EXTERNAL_SINGLE_FD_WATCH " \ 217 "is not compatible with requested socket polling syscall."); 218 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 219 } 220 #if ! defined(MHD_SUPPORT_EPOLL) && ! defined(MHD_SUPPORT_KQUEUE) 221 mhd_LOG_MSG ( \ 222 d, MHD_SC_FEATURE_DISABLED, \ 223 "The epoll or kqueue is required for the requested work mode " \ 224 "MHD_WM_EXTERNAL_SINGLE_FD_WATCH, but none is available on this " \ 225 "platform or MHD build."); 226 return MHD_SC_FEATURE_DISABLED; 227 #else 228 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS; 229 #endif 230 break; 231 case MHD_WM_THREAD_PER_CONNECTION: 232 if (MHD_SPS_EPOLL == s->poll_syscall) 233 { 234 mhd_LOG_MSG ( \ 235 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 236 "The requested work mode MHD_WM_THREAD_PER_CONNECTION " \ 237 "is not compatible with 'epoll' sockets polling."); 238 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 239 } 240 if (MHD_SPS_KQUEUE == s->poll_syscall) 241 { 242 mhd_LOG_MSG ( \ 243 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 244 "The requested work mode MHD_WM_THREAD_PER_CONNECTION " \ 245 "is not compatible with 'kqueue' sockets polling."); 246 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 247 } 248 mhd_FALLTHROUGH; 249 /* Intentional fallthrough */ 250 case MHD_WM_WORKER_THREADS: 251 #ifndef MHD_SUPPORT_THREADS 252 mhd_LOG_MSG (d, MHD_SC_FEATURE_DISABLED, \ 253 "The internal threads modes are not supported by this " \ 254 "build of MHD."); 255 return MHD_SC_FEATURE_DISABLED; 256 #else /* MHD_SUPPORT_THREADS */ 257 if (MHD_WM_THREAD_PER_CONNECTION == s->work_mode.mode) 258 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION; 259 else if (1 >= s->work_mode.params.num_worker_threads) /* && (MHD_WM_WORKER_THREADS == s->work_mode.mode) */ 260 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_ONE_THREAD; 261 else 262 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL; 263 #endif /* MHD_SUPPORT_THREADS */ 264 break; 265 default: 266 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_WM, \ 267 "Wrong requested work mode."); 268 return MHD_SC_CONFIGURATION_UNEXPECTED_WM; 269 } 270 271 if ((mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) && 272 (mhd_WM_INT_EXTERNAL_EVENTS_EDGE != d->wmode_int) && 273 (MHD_NO != s->reregister_all)) 274 { 275 mhd_LOG_MSG ( \ 276 d, \ 277 MHD_SC_EXTERNAL_EVENT_ONLY, \ 278 "The MHD_D_O_REREGISTER_ALL option can be used only with external " \ 279 "events work modes."); 280 return MHD_SC_EXTERNAL_EVENT_ONLY; 281 } 282 283 return MHD_SC_OK; 284 } 285 286 287 union mhd_SockaddrAny 288 { 289 struct sockaddr sa; 290 struct sockaddr_in sa_i4; 291 #ifdef HAVE_INET6 292 struct sockaddr_in6 sa_i6; 293 #endif /* HAVE_INET6 */ 294 struct sockaddr_storage sa_stor; 295 }; 296 297 298 /** 299 * The type of the socket to create 300 */ 301 enum mhd_CreateSktType 302 { 303 /** 304 * Unknown address family (could be IP or not IP) 305 */ 306 mhd_SKT_UNKNOWN = -4 307 , 308 /** 309 * The socket is not IP. 310 */ 311 mhd_SKT_NON_IP = -2 312 , 313 /** 314 * The socket is UNIX. 315 */ 316 mhd_SKT_UNIX = -1 317 , 318 /** 319 * No socket 320 */ 321 mhd_SKT_NO_SOCKET = MHD_AF_NONE 322 , 323 /** 324 * IPv4 only 325 */ 326 mhd_SKT_IP_V4_ONLY = MHD_AF_INET4 327 , 328 /** 329 * IPv6 only 330 */ 331 mhd_SKT_IP_V6_ONLY = MHD_AF_INET6 332 , 333 /** 334 * IPv6 with dual stack enabled 335 */ 336 mhd_SKT_IP_DUAL_REQUIRED = MHD_AF_DUAL 337 , 338 /** 339 * Try IPv6 with dual stack then IPv4 340 */ 341 mhd_SKT_IP_V4_WITH_V6_OPT = MHD_AF_DUAL_v6_OPTIONAL 342 , 343 /** 344 * IPv6 with optional dual stack 345 */ 346 mhd_SKT_IP_V6_WITH_V4_OPT = MHD_AF_DUAL_v4_OPTIONAL 347 , 348 /** 349 * Try IPv4 then IPv6 with optional dual stack 350 */ 351 mhd_SKT_IP_V4_WITH_FALLBACK = 16 352 }; 353 354 /** 355 * Create socket, bind to the address and start listening on the socket. 356 * 357 * The socket is assigned to the daemon as listening FD. 358 * @param d the daemon to use 359 * @param s the user settings 360 * @param v6_tried true if IPv6 has been tried already 361 * @param force_v6_any_dual true if IPv6 is forced with dual stack either 362 * enabled or not 363 * @param prev_bnd_lstn_err if this function was already tried with another and 364 * failed to bind or to start listening then 365 * this parameter must be set to respecting status 366 * code, otherwise this parameter must be #MHD_SC_OK 367 * @return #MHD_SC_OK on success, 368 * the error code otherwise (no error printed to log if result is 369 * #MHD_SC_LISTEN_SOCKET_BIND_FAILED or #MHD_SC_LISTEN_FAILURE) 370 */ 371 static enum MHD_StatusCode 372 create_bind_listen_stream_socket_inner (struct MHD_Daemon *restrict d, 373 struct DaemonOptions *restrict s, 374 bool v6_tried, 375 bool force_v6_any_dual, 376 enum MHD_StatusCode prev_bnd_lstn_err) 377 { 378 MHD_Socket sk; 379 enum mhd_CreateSktType sk_type; 380 bool sk_already_listening; 381 union mhd_SockaddrAny sa_all; 382 const struct sockaddr *p_use_sa; 383 socklen_t use_sa_size; 384 uint_least16_t sk_port; 385 bool is_non_block; 386 bool is_non_inhr; 387 enum MHD_StatusCode ret; 388 389 sk = MHD_INVALID_SOCKET; 390 sk_type = mhd_SKT_NO_SOCKET; 391 sk_already_listening = false; 392 p_use_sa = NULL; 393 use_sa_size = 0; 394 sk_port = 0; 395 396 #ifndef HAVE_INET6 397 mhd_assert (! v6_tried); 398 mhd_assert (! force_v6_any_dual); 399 #endif 400 mhd_assert (mhd_SKT_NO_SOCKET == sk_type); /* Mute analyser warning */ 401 402 if (MHD_INVALID_SOCKET != s->listen_socket) 403 { 404 mhd_assert (! v6_tried); 405 mhd_assert (! force_v6_any_dual); 406 /* Check for options conflicts */ 407 if (0 != s->bind_sa.v_sa_len) 408 { 409 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 410 "MHD_D_O_BIND_SA cannot be used together " \ 411 "with MHD_D_O_LISTEN_SOCKET"); 412 return MHD_SC_OPTIONS_CONFLICT; 413 } 414 else if (MHD_AF_NONE != s->bind_port.v_af) 415 { 416 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 417 "MHD_D_O_BIND_PORT cannot be used together " \ 418 "with MHD_D_O_LISTEN_SOCKET"); 419 return MHD_SC_OPTIONS_CONFLICT; 420 } 421 422 /* No options conflicts */ 423 sk = s->listen_socket; 424 s->listen_socket = MHD_INVALID_SOCKET; /* Prevent closing with settings cleanup */ 425 sk_type = mhd_SKT_UNKNOWN; 426 sk_already_listening = true; 427 } 428 else if ((0 != s->bind_sa.v_sa_len) || (MHD_AF_NONE != s->bind_port.v_af)) 429 { 430 if (0 != s->bind_sa.v_sa_len) 431 { 432 mhd_assert (! v6_tried); 433 mhd_assert (! force_v6_any_dual); 434 435 /* Check for options conflicts */ 436 if (MHD_AF_NONE != s->bind_port.v_af) 437 { 438 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 439 "MHD_D_O_BIND_SA cannot be used together " \ 440 "with MHD_D_O_BIND_PORT"); 441 return MHD_SC_OPTIONS_CONFLICT; 442 } 443 444 /* No options conflicts */ 445 switch (s->bind_sa.v_sa->sa_family) 446 { 447 case AF_INET: 448 sk_type = mhd_SKT_IP_V4_ONLY; 449 if (sizeof(sa_all.sa_i4) > s->bind_sa.v_sa_len) 450 { 451 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_WRONG_SA_SIZE, \ 452 "The size of the provided sockaddr does not match " 453 "used address family"); 454 return MHD_SC_CONFIGURATION_WRONG_SA_SIZE; 455 } 456 memcpy (&(sa_all.sa_i4), s->bind_sa.v_sa, sizeof(sa_all.sa_i4)); 457 sk_port = (uint_least16_t) ntohs (sa_all.sa_i4.sin_port); 458 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 459 mhd_assert (sizeof(sa_all.sa_i4) == (uint8_t) sizeof(sa_all.sa_i4)); 460 sa_all.sa_i4.sin_len = (uint8_t) sizeof(sa_all.sa_i4); 461 #endif 462 p_use_sa = (struct sockaddr *) &(sa_all.sa_i4); 463 use_sa_size = (socklen_t) sizeof(sa_all.sa_i4); 464 break; 465 #ifdef HAVE_INET6 466 case AF_INET6: 467 sk_type = mhd_SKT_IP_V6_ONLY; 468 if (sizeof(sa_all.sa_i6) > s->bind_sa.v_sa_len) 469 { 470 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_WRONG_SA_SIZE, \ 471 "The size of the provided sockaddr does not match " 472 "used address family"); 473 return MHD_SC_CONFIGURATION_WRONG_SA_SIZE; 474 } 475 memcpy (&(sa_all.sa_i6), s->bind_sa.v_sa, s->bind_sa.v_sa_len); 476 sk_port = (uint_least16_t) ntohs (sa_all.sa_i6.sin6_port); 477 #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 478 mhd_assert (sizeof(sa_all.sa_i6) == (uint8_t) sizeof(sa_all.sa_i6)); 479 sa_all.sa_i6.sin6_len = (uint8_t) sizeof(sa_all.sa_i6); 480 #endif 481 p_use_sa = (struct sockaddr *) &(sa_all.sa_i6); 482 use_sa_size = (socklen_t) sizeof(sa_all.sa_i6); 483 break; 484 #endif /* HAVE_INET6 */ 485 #ifdef MHD_AF_UNIX 486 case MHD_AF_UNIX: 487 sk_type = mhd_SKT_UNIX; 488 p_use_sa = NULL; /* To be set below */ 489 break; 490 #endif /* MHD_AF_UNIX */ 491 default: 492 sk_type = mhd_SKT_UNKNOWN; 493 p_use_sa = NULL; /* To be set below */ 494 break; 495 } 496 497 if (s->bind_sa.v_dual) 498 { 499 if (mhd_SKT_IP_V6_ONLY != sk_type) 500 { 501 mhd_LOG_MSG (d, MHD_SC_LISTEN_DUAL_STACK_NOT_SUITABLE, \ 502 "IP dual stack is not possible for provided sockaddr"); 503 } 504 #ifdef HAVE_INET6 505 else 506 { 507 #ifdef HAVE_DCLR_IPV6_V6ONLY 508 sk_type = mhd_SKT_IP_DUAL_REQUIRED; 509 #else /* ! IPV6_V6ONLY */ 510 mhd_LOG_MSG (d, \ 511 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_NOT_SUPPORTED, \ 512 "IP dual stack is not supported by this platform or " \ 513 "by this MHD build"); 514 #endif /* ! IPV6_V6ONLY */ 515 } 516 #endif /* HAVE_INET6 */ 517 } 518 519 if (NULL == p_use_sa) 520 { 521 #if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) && \ 522 defined(HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN) 523 if ((((size_t) s->bind_sa.v_sa->sa_len) != s->bind_sa.v_sa_len) && 524 (sizeof(sa_all) >= s->bind_sa.v_sa_len)) 525 { 526 /* Fix embedded 'sa_len' member if possible */ 527 memcpy (&sa_all, s->bind_sa.v_sa, s->bind_sa.v_sa_len); 528 mhd_assert (s->bind_sa.v_sa_len == (uint8_t) s->bind_sa.v_sa_len); 529 sa_all.sa_stor.ss_len = (uint8_t) s->bind_sa.v_sa_len; 530 p_use_sa = (const struct sockaddr *) &(sa_all.sa_stor); 531 } 532 else 533 #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN && HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN */ 534 p_use_sa = s->bind_sa.v_sa; 535 use_sa_size = (socklen_t) s->bind_sa.v_sa_len; 536 } 537 } 538 else /* if (MHD_AF_NONE != s->bind_port.v_af) */ 539 { 540 /* No options conflicts */ 541 switch (s->bind_port.v_af) 542 { 543 case MHD_AF_NONE: 544 mhd_assert (0); 545 mhd_UNREACHABLE (); 546 return MHD_SC_INTERNAL_ERROR; 547 case MHD_AF_AUTO: 548 #ifdef HAVE_INET6 549 #ifdef HAVE_DCLR_IPV6_V6ONLY 550 if (force_v6_any_dual) 551 sk_type = mhd_SKT_IP_V6_WITH_V4_OPT; 552 else if (v6_tried) 553 sk_type = mhd_SKT_IP_V4_WITH_FALLBACK; 554 else 555 sk_type = mhd_SKT_IP_V4_WITH_V6_OPT; 556 #else /* ! IPV6_V6ONLY */ 557 mhd_assert (! v6_tried); 558 if (force_v6_any_dual) 559 sk_type = mhd_SKT_IP_V6_ONLY; 560 else 561 sk_type = mhd_SKT_IP_V4_WITH_FALLBACK; 562 #endif /* ! IPV6_V6ONLY */ 563 #else /* ! HAVE_INET6 */ 564 sk_type = mhd_SKT_IP_V4_ONLY; 565 #endif /* ! HAVE_INET6 */ 566 break; 567 case MHD_AF_INET4: 568 mhd_assert (! v6_tried); 569 mhd_assert (! force_v6_any_dual); 570 sk_type = mhd_SKT_IP_V4_ONLY; 571 break; 572 case MHD_AF_INET6: 573 mhd_assert (! v6_tried); 574 mhd_assert (! force_v6_any_dual); 575 #ifdef HAVE_INET6 576 sk_type = mhd_SKT_IP_V6_ONLY; 577 #else /* ! HAVE_INET6 */ 578 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 579 "IPv6 is not supported by this MHD build or " \ 580 "by this platform"); 581 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 582 #endif /* ! HAVE_INET6 */ 583 break; 584 case MHD_AF_DUAL: 585 mhd_assert (! v6_tried); 586 mhd_assert (! force_v6_any_dual); 587 #ifdef HAVE_INET6 588 #ifdef HAVE_DCLR_IPV6_V6ONLY 589 sk_type = mhd_SKT_IP_DUAL_REQUIRED; 590 #else /* ! IPV6_V6ONLY */ 591 mhd_LOG_MSG (d, 592 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_NOT_SUPPORTED, \ 593 "IP dual stack is not supported by this platform or " \ 594 "by this MHD build"); 595 sk_type = mhd_SKT_IP_V6_ONLY; 596 #endif /* ! IPV6_V6ONLY */ 597 #else /* ! HAVE_INET6 */ 598 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 599 "IPv6 is not supported by this MHD build or " \ 600 "by this platform"); 601 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 602 #endif /* ! HAVE_INET6 */ 603 break; 604 case MHD_AF_DUAL_v4_OPTIONAL: 605 mhd_assert (! v6_tried); 606 mhd_assert (! force_v6_any_dual); 607 #ifdef HAVE_INET6 608 #ifdef HAVE_DCLR_IPV6_V6ONLY 609 sk_type = mhd_SKT_IP_V6_WITH_V4_OPT; 610 #else /* ! IPV6_V6ONLY */ 611 sk_type = mhd_SKT_IP_V6_ONLY; 612 #endif /* ! IPV6_V6ONLY */ 613 #else /* ! HAVE_INET6 */ 614 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 615 "IPv6 is not supported by this MHD build or " \ 616 "by this platform"); 617 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 618 #endif /* ! HAVE_INET6 */ 619 break; 620 case MHD_AF_DUAL_v6_OPTIONAL: 621 mhd_assert (! force_v6_any_dual); 622 #ifdef HAVE_INET6 623 #ifdef HAVE_DCLR_IPV6_V6ONLY 624 sk_type = (! v6_tried) ? 625 mhd_SKT_IP_V4_WITH_V6_OPT : mhd_SKT_IP_V4_ONLY; 626 #else /* ! IPV6_V6ONLY */ 627 mhd_assert (! v6_tried); 628 sk_type = mhd_SKT_IP_V4_ONLY; 629 #endif /* ! IPV6_V6ONLY */ 630 #else /* ! HAVE_INET6 */ 631 mhd_assert (! v6_tried); 632 sk_type = mhd_SKT_IP_V4_ONLY; 633 #endif /* ! HAVE_INET6 */ 634 break; 635 default: 636 mhd_LOG_MSG (d, MHD_SC_AF_NOT_SUPPORTED_BY_BUILD, \ 637 "Unknown address family specified"); 638 return MHD_SC_AF_NOT_SUPPORTED_BY_BUILD; 639 } 640 641 mhd_assert (mhd_SKT_NO_SOCKET < sk_type); 642 643 switch (sk_type) 644 { 645 case mhd_SKT_IP_V4_ONLY: 646 case mhd_SKT_IP_V4_WITH_FALLBACK: 647 /* Zeroing is not required, but may help on exotic platforms */ 648 memset (&(sa_all.sa_i4), 0, sizeof(sa_all.sa_i4)); 649 sa_all.sa_i4.sin_family = AF_INET; 650 sa_all.sa_i4.sin_port = htons (s->bind_port.v_port); 651 sa_all.sa_i4.sin_addr.s_addr = INADDR_ANY; 652 if (0 != INADDR_ANY) /* Optimised at compile time */ 653 sa_all.sa_i4.sin_addr.s_addr = htonl (INADDR_ANY); 654 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 655 sa_all.sa_i4.sin_len = (uint8_t) sizeof (sa_all.sa_i4); 656 #endif 657 p_use_sa = (const struct sockaddr *) &(sa_all.sa_i4); 658 use_sa_size = (socklen_t) sizeof (sa_all.sa_i4); 659 break; 660 case mhd_SKT_IP_V6_ONLY: 661 case mhd_SKT_IP_DUAL_REQUIRED: 662 case mhd_SKT_IP_V4_WITH_V6_OPT: 663 case mhd_SKT_IP_V6_WITH_V4_OPT: 664 #ifdef HAVE_INET6 665 if (1) 666 { 667 #ifdef IN6ADDR_ANY_INIT 668 static const struct in6_addr static_in6any = IN6ADDR_ANY_INIT; 669 #endif 670 /* Zeroing is required by POSIX */ 671 memset (&(sa_all.sa_i6), 0, sizeof(sa_all.sa_i6)); 672 sa_all.sa_i6.sin6_family = AF_INET6; 673 sa_all.sa_i6.sin6_port = htons (s->bind_port.v_port); 674 #ifdef IN6ADDR_ANY_INIT /* Optional assignment at the address is all zeros anyway */ 675 sa_all.sa_i6.sin6_addr = static_in6any; 676 #endif 677 #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 678 sa_all.sa_i6.sin6_len = (uint8_t) sizeof (sa_all.sa_i6); 679 #endif 680 p_use_sa = (const struct sockaddr *) &(sa_all.sa_i6); 681 use_sa_size = (socklen_t) sizeof (sa_all.sa_i6); 682 } 683 break; 684 #endif /* HAVE_INET6 */ 685 case mhd_SKT_UNKNOWN: 686 case mhd_SKT_NON_IP: 687 case mhd_SKT_UNIX: 688 case mhd_SKT_NO_SOCKET: 689 default: 690 mhd_UNREACHABLE (); 691 return MHD_SC_INTERNAL_ERROR; 692 } 693 694 sk_port = s->bind_port.v_port; 695 696 } 697 } 698 else 699 { 700 /* No listen socket */ 701 d->net.listen.fd = MHD_INVALID_SOCKET; 702 d->net.listen.is_broken = false; 703 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 704 d->net.listen.non_block = false; 705 d->net.listen.port = 0; 706 707 return MHD_SC_OK; 708 } 709 710 mhd_assert (mhd_SKT_NO_SOCKET != sk_type); 711 mhd_assert ((NULL != p_use_sa) || sk_already_listening); 712 mhd_assert ((MHD_INVALID_SOCKET == sk) || sk_already_listening); 713 714 if (MHD_INVALID_SOCKET == sk) 715 { 716 mhd_assert (NULL != p_use_sa); 717 #if defined(MHD_SOCKETS_KIND_WINSOCK) && defined(WSA_FLAG_NO_HANDLE_INHERIT) 718 /* May fail before Win7 SP1 */ 719 sk = WSASocketW (p_use_sa->sa_family, SOCK_STREAM, 0, 720 NULL, 0, WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT); 721 722 if (MHD_INVALID_SOCKET == sk) 723 #endif /* MHD_SOCKETS_KIND_WINSOCK && WSA_FLAG_NO_HANDLE_INHERIT */ 724 sk = socket (p_use_sa->sa_family, 725 SOCK_STREAM | mhd_SOCK_NONBLOCK 726 | mhd_SOCK_CLOEXEC | mhd_SOCK_NOSIGPIPE, 0); 727 728 if (MHD_INVALID_SOCKET == sk) 729 { 730 #ifdef HAVE_INET6 731 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 732 return create_bind_listen_stream_socket_inner (d, 733 s, 734 v6_tried, 735 true, 736 prev_bnd_lstn_err); 737 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 738 return create_bind_listen_stream_socket_inner (d, 739 s, 740 true, 741 false, 742 prev_bnd_lstn_err); 743 #endif /* HAVE_INET6 */ 744 745 if (MHD_SC_OK != prev_bnd_lstn_err) 746 return prev_bnd_lstn_err; 747 748 if (mhd_SCKT_LERR_IS_AF ()) 749 { 750 mhd_LOG_MSG (d, MHD_SC_AF_NOT_AVAILABLE, \ 751 "The requested socket address family is rejected " \ 752 "by the OS"); 753 return MHD_SC_AF_NOT_AVAILABLE; 754 } 755 mhd_LOG_MSG (d, MHD_SC_FAILED_TO_OPEN_LISTEN_SOCKET, \ 756 "Failed to open listen socket"); 757 758 return MHD_SC_FAILED_TO_OPEN_LISTEN_SOCKET; 759 } 760 is_non_block = (0 != mhd_SOCK_NONBLOCK); 761 is_non_inhr = (0 != mhd_SOCK_CLOEXEC); 762 } 763 else 764 { 765 is_non_block = false; /* Try to set non-block */ 766 is_non_inhr = false; /* Try to set non-inheritable */ 767 } 768 769 /* The listen socket must be closed if error code returned 770 beyond this point */ 771 772 ret = MHD_SC_OK; 773 774 do 775 { /* The scope for automatic socket close for error returns */ 776 if (! mhd_FD_FITS_DAEMON (d,sk)) 777 { 778 mhd_LOG_MSG (d, MHD_SC_LISTEN_FD_OUTSIDE_OF_SET_RANGE, \ 779 "The listen FD value is higher than allowed"); 780 ret = MHD_SC_LISTEN_FD_OUTSIDE_OF_SET_RANGE; 781 break; 782 } 783 784 if (! is_non_inhr) 785 { 786 if (! mhd_socket_noninheritable (sk)) 787 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NOINHERIT_FAILED, \ 788 "OS refused to make the listen socket non-inheritable"); 789 } 790 791 if (! sk_already_listening) 792 { 793 #ifdef HAVE_INET6 794 #ifdef HAVE_DCLR_IPV6_V6ONLY 795 if ((mhd_SKT_IP_V6_ONLY == sk_type) || 796 (mhd_SKT_IP_DUAL_REQUIRED == sk_type) || 797 (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) || 798 (mhd_SKT_IP_V6_WITH_V4_OPT == sk_type) || 799 (mhd_SKT_UNKNOWN == sk_type)) 800 { 801 mhd_SCKT_OPT_BOOL no_dual_to_set; 802 bool use_dual; 803 804 use_dual = ((mhd_SKT_IP_DUAL_REQUIRED == sk_type) || 805 (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) || 806 (mhd_SKT_IP_V6_WITH_V4_OPT == sk_type)); 807 no_dual_to_set = use_dual ? 0 : 1; 808 809 if (0 != mhd_setsockopt (sk, IPPROTO_IPV6, IPV6_V6ONLY, 810 (void *) &no_dual_to_set, 811 sizeof (no_dual_to_set))) 812 { 813 mhd_SCKT_OPT_BOOL no_dual_current; 814 socklen_t opt_size; 815 bool state_unknown; 816 bool state_match; 817 818 no_dual_current = 0; 819 opt_size = sizeof(no_dual_current); 820 821 /* Some platforms forbid setting this options, but allow 822 reading. */ 823 if ((0 != mhd_getsockopt (sk, IPPROTO_IPV6, IPV6_V6ONLY, 824 (void*) &no_dual_current, &opt_size)) 825 || (((socklen_t) sizeof(no_dual_current)) < opt_size)) 826 { 827 state_unknown = true; 828 state_match = false; 829 } 830 else 831 { 832 state_unknown = false; 833 state_match = ((! ! no_dual_current) == (! ! no_dual_to_set)); 834 } 835 836 if (state_unknown || ! state_match) 837 { 838 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 839 { 840 (void) mhd_socket_close (sk); 841 return create_bind_listen_stream_socket_inner (d, 842 s, 843 true, 844 false, 845 prev_bnd_lstn_err); 846 } 847 if (! state_unknown) 848 { 849 /* The dual-stack state is definitely wrong */ 850 if (mhd_SKT_IP_V6_ONLY == sk_type) 851 { 852 mhd_LOG_MSG ( \ 853 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED, \ 854 "Failed to disable IP dual-stack configuration " \ 855 "for the listen socket"); 856 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 857 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED : 858 prev_bnd_lstn_err; 859 break; 860 } 861 else if (mhd_SKT_UNKNOWN != sk_type) 862 { 863 mhd_LOG_MSG ( \ 864 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED, \ 865 "Cannot enable IP dual-stack configuration " \ 866 "for the listen socket"); 867 if (mhd_SKT_IP_DUAL_REQUIRED == sk_type) 868 { 869 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 870 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED : 871 prev_bnd_lstn_err; 872 break; 873 } 874 } 875 } 876 else 877 { 878 /* The dual-stack state is unknown */ 879 if (mhd_SKT_UNKNOWN != sk_type) 880 mhd_LOG_MSG ( 881 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_UNKNOWN, \ 882 "Failed to set dual-stack (IPV6_ONLY) configuration " \ 883 "for the listen socket, using system defaults"); 884 } 885 } 886 } 887 } 888 #else /* ! IPV6_V6ONLY */ 889 mhd_assert (mhd_SKT_IP_DUAL_REQUIRED != sk_type); 890 mhd_assert (mhd_SKT_IP_V4_WITH_V6_OPT != sk_type); 891 mhd_assert (mhd_SKT_IP_V6_WITH_V4_OPT != sk_type); 892 #endif /* ! IPV6_V6ONLY */ 893 #endif /* HAVE_INET6 */ 894 895 if (MHD_FOM_AUTO <= d->settings->tcp_fastopen.v_option) 896 { 897 #if defined(HAVE_DCLR_TCP_FASTOPEN) 898 int fo_param; 899 #ifdef __linux__ 900 /* The parameter is the queue length */ 901 fo_param = (int) d->settings->tcp_fastopen.v_queue_length; 902 if (0 == fo_param) 903 fo_param = MHD_TCP_FASTOPEN_DEF_QUEUE_LEN; 904 #else /* ! __linux__ */ 905 fo_param = 1; /* The parameter is on/off type of setting */ 906 #endif /* ! __linux__ */ 907 if (0 != mhd_setsockopt (sk, IPPROTO_TCP, TCP_FASTOPEN, 908 (const void *) &fo_param, 909 sizeof (fo_param))) 910 { 911 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAST_OPEN_FAILURE, \ 912 "OS refused to enable TCP Fast Open on " \ 913 "the listen socket"); 914 if (MHD_FOM_AUTO < d->settings->tcp_fastopen.v_option) 915 { 916 ret = MHD_SC_LISTEN_FAST_OPEN_FAILURE; 917 break; 918 } 919 } 920 #else /* ! TCP_FASTOPEN */ 921 if (MHD_FOM_AUTO < d->settings->tcp_fastopen.v_option) 922 { 923 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAST_OPEN_FAILURE, \ 924 "The OS does not support TCP Fast Open"); 925 ret = MHD_SC_LISTEN_FAST_OPEN_FAILURE; 926 break; 927 } 928 #endif 929 } 930 931 if (MHD_D_OPTION_BIND_TYPE_NOT_SHARED >= d->settings->listen_addr_reuse) 932 { 933 #ifndef MHD_SOCKETS_KIND_WINSOCK 934 #ifdef HAVE_DCLR_SO_REUSEADDR 935 mhd_SCKT_OPT_BOOL on_val1 = 1; 936 if (0 != mhd_setsockopt (sk, SOL_SOCKET, SO_REUSEADDR, 937 (const void *) &on_val1, sizeof (on_val1))) 938 { 939 mhd_LOG_MSG (d, MHD_SC_LISTEN_PORT_REUSE_ENABLE_FAILED, \ 940 "OS refused to enable address reuse on " \ 941 "the listen socket"); 942 } 943 #else /* ! SO_REUSEADDR */ 944 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED, \ 945 "The OS does not support address reuse for sockets"); 946 #endif /* ! SO_REUSEADDR */ 947 #endif /* ! MHD_SOCKETS_KIND_WINSOCK */ 948 if (MHD_D_OPTION_BIND_TYPE_NOT_SHARED > d->settings->listen_addr_reuse) 949 { 950 #if defined(HAVE_DCLR_SO_REUSEPORT) || defined(MHD_SOCKETS_KIND_WINSOCK) 951 int opt_name; 952 mhd_SCKT_OPT_BOOL on_val2 = 1; 953 #ifndef MHD_SOCKETS_KIND_WINSOCK 954 opt_name = SO_REUSEPORT; 955 #else /* ! MHD_SOCKETS_KIND_WINSOCK */ 956 opt_name = SO_REUSEADDR; /* On W32 it is the same as SO_REUSEPORT on other platforms */ 957 #endif /* ! MHD_SOCKETS_KIND_WINSOCK */ 958 if (0 != mhd_setsockopt (sk, \ 959 SOL_SOCKET, \ 960 opt_name, \ 961 (const void *) &on_val2, \ 962 sizeof (on_val2))) 963 { 964 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_FAILED, \ 965 "OS refused to enable address sharing " \ 966 "on the listen socket"); 967 ret = MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_FAILED; 968 break; 969 } 970 #else /* ! SO_REUSEADDR && ! MHD_SOCKETS_KIND_WINSOCK */ 971 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED, \ 972 "The OS does not support address sharing for sockets"); 973 ret = MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED; 974 break; 975 #endif /* ! SO_REUSEADDR && ! MHD_SOCKETS_KIND_WINSOCK */ 976 } 977 } 978 #if defined(SO_EXCLUSIVEADDRUSE) || defined(SO_EXCLBIND) 979 else if (MHD_D_OPTION_BIND_TYPE_EXCLUSIVE <= 980 d->settings->listen_addr_reuse) 981 { 982 int opt_name; 983 mhd_SCKT_OPT_BOOL on_val = 1; 984 #ifdef SO_EXCLUSIVEADDRUSE 985 opt_name = SO_EXCLUSIVEADDRUSE; 986 #else 987 opt_name = SO_EXCLBIND; 988 #endif 989 if (0 != mhd_setsockopt (sk, \ 990 SOL_SOCKET, \ 991 opt_name, \ 992 (const void *) &on_val, \ 993 sizeof (on_val))) 994 { 995 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_EXCLUSIVE_ENABLE_FAILED, \ 996 "OS refused to enable exclusive address use " \ 997 "on the listen socket"); 998 ret = MHD_SC_LISTEN_ADDRESS_EXCLUSIVE_ENABLE_FAILED; 999 break; 1000 } 1001 } 1002 #endif /* SO_EXCLUSIVEADDRUSE || SO_EXCLBIND */ 1003 1004 mhd_assert (NULL != p_use_sa); 1005 mhd_assert (0 != use_sa_size); 1006 if (0 != bind (sk, p_use_sa, use_sa_size)) 1007 { 1008 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 1009 MHD_SC_LISTEN_SOCKET_BIND_FAILED : prev_bnd_lstn_err; 1010 #ifdef HAVE_INET6 1011 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 1012 { 1013 (void) mhd_socket_close (sk); 1014 return create_bind_listen_stream_socket_inner (d, 1015 s, 1016 v6_tried, 1017 true, 1018 ret); 1019 } 1020 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 1021 { 1022 (void) mhd_socket_close (sk); 1023 return create_bind_listen_stream_socket_inner (d, 1024 s, 1025 true, 1026 false, 1027 ret); 1028 } 1029 #endif /* HAVE_INET6 */ 1030 break; 1031 } 1032 1033 if (1) 1034 { 1035 int accept_queue_len; 1036 accept_queue_len = (int) s->listen_backlog; 1037 if (0 > accept_queue_len) 1038 accept_queue_len = 0; 1039 if (0 == accept_queue_len) 1040 { 1041 #if defined(SOMAXCONN) || defined(HAVE_DCLR_SOMAXCONN) 1042 accept_queue_len = SOMAXCONN; 1043 #else /* ! SOMAXCONN */ 1044 accept_queue_len = 127; /* Should be the safe value */ 1045 #endif /* ! SOMAXCONN */ 1046 } 1047 if (0 != listen (sk, accept_queue_len)) 1048 { 1049 ret = MHD_SC_LISTEN_FAILURE; 1050 #ifdef HAVE_INET6 1051 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 1052 { 1053 (void) mhd_socket_close (sk); 1054 return create_bind_listen_stream_socket_inner (d, 1055 s, 1056 v6_tried, 1057 true, 1058 ret); 1059 } 1060 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 1061 { 1062 (void) mhd_socket_close (sk); 1063 return create_bind_listen_stream_socket_inner (d, 1064 s, 1065 true, 1066 false, 1067 ret); 1068 } 1069 #endif /* HAVE_INET6 */ 1070 break; 1071 } 1072 } 1073 } 1074 /* A valid listening socket is ready here */ 1075 1076 if (! is_non_block) 1077 { 1078 is_non_block = mhd_socket_nonblocking (sk); 1079 if (! is_non_block) 1080 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE, \ 1081 "OS refused to make the listen socket non-blocking"); 1082 } 1083 1084 /* Set to the daemon only when the listening socket is fully ready */ 1085 d->net.listen.fd = sk; 1086 d->net.listen.is_broken = false; 1087 switch (sk_type) 1088 { 1089 case mhd_SKT_UNKNOWN: 1090 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 1091 break; 1092 case mhd_SKT_NON_IP: 1093 d->net.listen.type = mhd_SOCKET_TYPE_NON_IP; 1094 break; 1095 case mhd_SKT_UNIX: 1096 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1097 break; 1098 case mhd_SKT_IP_V4_ONLY: 1099 case mhd_SKT_IP_V6_ONLY: 1100 case mhd_SKT_IP_DUAL_REQUIRED: 1101 case mhd_SKT_IP_V4_WITH_V6_OPT: 1102 case mhd_SKT_IP_V6_WITH_V4_OPT: 1103 case mhd_SKT_IP_V4_WITH_FALLBACK: 1104 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1105 break; 1106 case mhd_SKT_NO_SOCKET: 1107 default: 1108 mhd_UNREACHABLE (); 1109 return MHD_SC_INTERNAL_ERROR; 1110 } 1111 d->net.listen.non_block = is_non_block; 1112 d->net.listen.port = sk_port; 1113 1114 mhd_assert (ret == MHD_SC_OK); 1115 1116 return MHD_SC_OK; 1117 1118 } while (0); 1119 1120 mhd_assert (MHD_SC_OK != ret); /* This should be only error returns here */ 1121 mhd_assert (MHD_INVALID_SOCKET != sk); 1122 (void) mhd_socket_close (sk); 1123 return ret; 1124 } 1125 1126 1127 /** 1128 * Create socket, bind to the address and start listening on the socket. 1129 * 1130 * The socket is assigned to the daemon as listening FD. 1131 * 1132 * @param d the daemon to use 1133 * @param s the user settings 1134 * @return #MHD_SC_OK on success, 1135 * the error code otherwise (no error printed to log if result is 1136 * #MHD_SC_LISTEN_SOCKET_BIND_FAILED or #MHD_SC_LISTEN_FAILURE) 1137 */ 1138 static enum MHD_StatusCode 1139 create_bind_listen_stream_socket (struct MHD_Daemon *restrict d, 1140 struct DaemonOptions *restrict s) 1141 { 1142 enum MHD_StatusCode ret; 1143 1144 ret = create_bind_listen_stream_socket_inner (d, 1145 s, 1146 false, 1147 false, 1148 MHD_SC_OK); 1149 #ifdef MHD_SUPPORT_LOG_FUNCTIONALITY 1150 if (MHD_SC_LISTEN_SOCKET_BIND_FAILED == ret) 1151 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_BIND_FAILED, \ 1152 "Failed to bind the listen socket"); 1153 else if (MHD_SC_LISTEN_FAILURE == ret) 1154 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAILURE, \ 1155 "Failed to start listening on the listen socket"); 1156 #endif /* MHD_SUPPORT_LOG_FUNCTIONALITY */ 1157 1158 return ret; 1159 } 1160 1161 1162 #ifdef MHD_USE_GETSOCKNAME 1163 /** 1164 * Detect and set the type and port of the listening socket 1165 * @param d the daemon to use 1166 */ 1167 static MHD_FN_PAR_NONNULL_ (1) void 1168 detect_listen_type_and_port (struct MHD_Daemon *restrict d) 1169 { 1170 union mhd_SockaddrAny sa_all; 1171 socklen_t sa_size; 1172 enum mhd_SocketType declared_type; 1173 1174 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 1175 mhd_assert (0 == d->net.listen.port); 1176 memset (&sa_all, 0, sizeof(sa_all)); /* Actually not required */ 1177 sa_size = (socklen_t) sizeof(sa_all); 1178 1179 if (0 != getsockname (d->net.listen.fd, &(sa_all.sa), &sa_size)) 1180 { 1181 if (mhd_SOCKET_TYPE_IP == d->net.listen.type) 1182 mhd_LOG_MSG (d, MHD_SC_LISTEN_PORT_DETECT_FAILURE, \ 1183 "Failed to detect the port number on the listening socket"); 1184 return; 1185 } 1186 1187 declared_type = d->net.listen.type; 1188 if (0 == sa_size) 1189 { 1190 #ifndef __linux__ 1191 /* Used on some non-Linux platforms */ 1192 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1193 d->net.listen.port = 0; 1194 #else /* ! __linux__ */ 1195 (void) 0; 1196 #endif /* ! __linux__ */ 1197 } 1198 else 1199 { 1200 switch (sa_all.sa.sa_family) 1201 { 1202 case AF_INET: 1203 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1204 d->net.listen.port = (uint_least16_t) ntohs (sa_all.sa_i4.sin_port); 1205 break; 1206 #ifdef HAVE_INET6 1207 case AF_INET6: 1208 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1209 d->net.listen.port = (uint_least16_t) ntohs (sa_all.sa_i6.sin6_port); 1210 break; 1211 #endif /* HAVE_INET6 */ 1212 #ifdef MHD_AF_UNIX 1213 case MHD_AF_UNIX: 1214 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1215 d->net.listen.port = 0; 1216 break; 1217 #endif /* MHD_AF_UNIX */ 1218 default: 1219 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 1220 d->net.listen.port = 0; 1221 break; 1222 } 1223 } 1224 1225 if ((declared_type != d->net.listen.type) 1226 && (mhd_SOCKET_TYPE_IP == declared_type)) 1227 mhd_LOG_MSG (d, MHD_SC_UNEXPECTED_SOCKET_ERROR, \ 1228 "The type of listen socket is detected as non-IP, while " \ 1229 "the socket has been created as an IP socket"); 1230 } 1231 1232 1233 #else 1234 # define detect_listen_type_and_port(d) ((void) d) 1235 #endif 1236 1237 1238 #ifdef MHD_SUPPORT_EPOLL 1239 1240 /** 1241 * Initialise daemon's epoll FD 1242 * The could be performed early to probe for epoll FD presence 1243 * or, normally, during worker initialisation 1244 * @param d the daemon object 1245 * @param early_probing 'true' if this is early epoll probing 1246 * @param log_failures set to true if errors logging should be suppressed 1247 * when fallback options exist 1248 * @return #MHD_SC_OK on success, 1249 * the error code otherwise 1250 */ 1251 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 1252 init_epoll_fd (struct MHD_Daemon *restrict d, 1253 bool early_probing, 1254 bool log_failures) 1255 { 1256 int e_fd; 1257 mhd_ASSUME (early_probing || log_failures); 1258 mhd_assert ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) || 1259 (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type)); 1260 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1261 mhd_assert (early_probing || d->dbg.net_inited); 1262 mhd_assert (! early_probing || ! d->dbg.net_inited); 1263 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1264 (NULL == d->events.data.epoll.events)); 1265 1266 if (! early_probing) 1267 { 1268 /* Full events initialisation */ 1269 mhd_ASSUME (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1270 if (! mhd_D_HAS_MASTER (d)) 1271 { 1272 mhd_assert (0 < d->events.data.epoll.early_fd); 1273 /* Move early initialised epoll FD */ 1274 d->events.data.epoll.e_fd = d->events.data.epoll.early_fd; 1275 d->events.data.epoll.early_fd = MHD_INVALID_SOCKET; 1276 return MHD_SC_OK; 1277 } 1278 #ifdef MHD_SUPPORT_THREADS 1279 else 1280 { 1281 /* Worker daemon */ 1282 int early_fd; 1283 1284 early_fd = d->threading.hier.master->events.data.epoll.early_fd; 1285 1286 /* Move early initialised epoll FD if it is not yet taken */ 1287 if (MHD_INVALID_SOCKET != early_fd) 1288 { 1289 d->events.data.epoll.e_fd = early_fd; 1290 d->threading.hier.master->events.data.epoll.early_fd = 1291 MHD_INVALID_SOCKET; 1292 return MHD_SC_OK; 1293 } 1294 /* Process with new epoll FD creation */ 1295 } 1296 #endif /* MHD_SUPPORT_THREADS */ 1297 } 1298 else 1299 mhd_ASSUME (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type); 1300 1301 #ifdef HAVE_EPOLL_CREATE1 1302 e_fd = epoll_create1 (EPOLL_CLOEXEC); 1303 #else /* ! HAVE_EPOLL_CREATE1 */ 1304 e_fd = epoll_create (128); /* The number is usually ignored */ 1305 if (0 <= e_fd) 1306 { 1307 if (! mhd_socket_noninheritable (e_fd)) 1308 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_CONFIGURE_NOINHERIT_FAILED, \ 1309 "Failed to make epoll control FD non-inheritable"); 1310 } 1311 #endif /* ! HAVE_EPOLL_CREATE1 */ 1312 if (0 > e_fd) 1313 { 1314 if (log_failures) 1315 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_CREATE_FAILED, \ 1316 "Failed to create epoll control FD"); 1317 return MHD_SC_EPOLL_CTL_CREATE_FAILED; /* Failure exit point */ 1318 } 1319 1320 if (! mhd_FD_FITS_DAEMON (d, e_fd)) 1321 { 1322 if (log_failures) 1323 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE, \ 1324 "The epoll control FD value is higher than allowed"); 1325 (void) close (e_fd); 1326 return MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE; /* Failure exit point */ 1327 } 1328 1329 /* Needs to be set here as setting epoll data member 'early_fd' */ 1330 d->events.poll_type = mhd_POLL_TYPE_EPOLL; 1331 if (! early_probing) 1332 d->events.data.epoll.e_fd = e_fd; 1333 else 1334 d->events.data.epoll.early_fd = e_fd; 1335 1336 return MHD_SC_OK; /* Success exit point */ 1337 } 1338 1339 1340 /** 1341 * Deinitialise daemon's epoll FD 1342 * @param d the daemon object 1343 */ 1344 MHD_FN_PAR_NONNULL_ (1) static void 1345 deinit_epoll_fd (struct MHD_Daemon *restrict d) 1346 { 1347 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1348 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 1349 mhd_assert (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd); 1350 close (d->events.data.epoll.e_fd); 1351 } 1352 1353 1354 #endif /* MHD_SUPPORT_EPOLL */ 1355 1356 1357 /** 1358 * Choose sockets monitoring syscall and pre-initialise it 1359 * @param d the daemon object 1360 * @param s the user settings 1361 * @return #MHD_SC_OK on success, 1362 * the error code otherwise 1363 */ 1364 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1365 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1366 daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d, 1367 struct DaemonOptions *restrict s) 1368 { 1369 enum mhd_IntPollType chosen_type; 1370 1371 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type); 1372 1373 mhd_assert ((mhd_WM_INT_EXTERNAL_EVENTS_EDGE != d->wmode_int) || \ 1374 (mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) || \ 1375 (MHD_SPS_AUTO == s->poll_syscall)); 1376 1377 /* Check whether the provided parameter is in the range of expected values. 1378 Reject unsupported or disabled values. */ 1379 switch (s->poll_syscall) 1380 { 1381 case MHD_SPS_AUTO: 1382 chosen_type = mhd_POLL_TYPE_NOT_SET_YET; 1383 break; 1384 case MHD_SPS_SELECT: 1385 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1386 #ifndef MHD_SUPPORT_SELECT 1387 mhd_LOG_MSG (d, MHD_SC_SELECT_SYSCALL_NOT_AVAILABLE, \ 1388 "'select()' is not supported by the platform or " \ 1389 "this MHD build"); 1390 return MHD_SC_SELECT_SYSCALL_NOT_AVAILABLE; 1391 #else /* MHD_SUPPORT_SELECT */ 1392 chosen_type = mhd_POLL_TYPE_SELECT; 1393 #endif /* MHD_SUPPORT_SELECT */ 1394 break; 1395 case MHD_SPS_POLL: 1396 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1397 #ifndef MHD_SUPPORT_POLL 1398 mhd_LOG_MSG (d, MHD_SC_POLL_SYSCALL_NOT_AVAILABLE, \ 1399 "'poll()' is not supported by the platform or " \ 1400 "this MHD build"); 1401 return MHD_SC_POLL_SYSCALL_NOT_AVAILABLE; 1402 #else /* MHD_SUPPORT_POLL */ 1403 chosen_type = mhd_POLL_TYPE_POLL; 1404 #endif /* MHD_SUPPORT_POLL */ 1405 break; 1406 case MHD_SPS_EPOLL: 1407 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1408 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1409 #ifndef MHD_SUPPORT_EPOLL 1410 mhd_LOG_MSG (d, MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE, \ 1411 "'epoll' is not supported by the platform or " \ 1412 "this MHD build"); 1413 return MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE; 1414 #else /* MHD_SUPPORT_EPOLL */ 1415 chosen_type = mhd_POLL_TYPE_EPOLL; 1416 #endif /* MHD_SUPPORT_EPOLL */ 1417 break; 1418 case MHD_SPS_KQUEUE: 1419 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1420 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1421 #ifndef MHD_SUPPORT_KQUEUE 1422 mhd_LOG_MSG (d, MHD_SC_KQUEUE_SYSCALL_NOT_AVAILABLE, \ 1423 "'kqueue' is not supported by the platform or " \ 1424 "this MHD build"); 1425 return MHD_SC_KQUEUE_SYSCALL_NOT_AVAILABLE; 1426 #else /* MHD_SUPPORT_EPOLL */ 1427 chosen_type = mhd_POLL_TYPE_KQUEUE; 1428 #endif /* MHD_SUPPORT_EPOLL */ 1429 break; 1430 default: 1431 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_SPS, 1432 "Wrong socket polling syscall specified"); 1433 return MHD_SC_CONFIGURATION_UNEXPECTED_SPS; 1434 } 1435 1436 #ifdef MHD_SUPPORT_HTTPS 1437 if ((mhd_WM_INT_EXTERNAL_EVENTS_EDGE == (d)->wmode_int) || 1438 mhd_POLL_TYPE_INT_IS_EDGE_TRIG (chosen_type)) 1439 { /* Edge-triggered polling chosen */ 1440 if (MHD_TLS_BACKEND_NONE != s->tls) 1441 { 1442 if (! mhd_tls_is_edge_trigg_supported (s)) 1443 { 1444 #ifdef MHD_SUPPORT_LOG_FUNCTIONALITY 1445 if (MHD_TLS_BACKEND_ANY == s->tls) 1446 mhd_LOG_MSG (d, MHD_SC_TLS_BACKEND_DAEMON_INCOMPATIBLE_SETTINGS, \ 1447 "Edge-triggered sockets polling cannot be used " 1448 "with available TLS backends"); 1449 else 1450 mhd_LOG_MSG (d, MHD_SC_TLS_BACKEND_DAEMON_INCOMPATIBLE_SETTINGS, \ 1451 "Edge-triggered sockets polling cannot be used " 1452 "with selected TLS backend"); 1453 #endif /* MHD_SUPPORT_LOG_FUNCTIONALITY */ 1454 return MHD_SC_TLS_BACKEND_DAEMON_INCOMPATIBLE_SETTINGS; 1455 } 1456 } 1457 } 1458 #endif /* MHD_SUPPORT_HTTPS */ 1459 1460 mhd_assert (mhd_POLL_TYPE_EXT != chosen_type); 1461 mhd_ASSUME ((mhd_POLL_TYPE_NOT_SET_YET != chosen_type) || \ 1462 (MHD_SPS_AUTO == s->poll_syscall)); 1463 1464 if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1465 { 1466 if (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)) 1467 chosen_type = mhd_POLL_TYPE_EXT; 1468 } 1469 1470 #if defined(MHD_SUPPORT_EPOLL) || defined(MHD_SUPPORT_KQUEUE) 1471 /* Try edge-triggered polling */ 1472 if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1473 || mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type) 1474 || mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type)) 1475 { 1476 bool et_allowed; 1477 1478 et_allowed = true; 1479 if (mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)) 1480 { 1481 mhd_assert (! mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type)); 1482 mhd_assert (! mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type)); 1483 et_allowed = false; 1484 } 1485 # ifdef MHD_SUPPORT_HTTPS 1486 else if (MHD_TLS_BACKEND_NONE != s->tls) 1487 et_allowed = mhd_tls_is_edge_trigg_supported (s); 1488 # endif /* MHD_SUPPORT_HTTPS */ 1489 1490 if (et_allowed) 1491 { 1492 mhd_ASSUME ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1493 || mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type) 1494 || mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type)); 1495 1496 # if defined(MHD_SUPPORT_KQUEUE) 1497 if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1498 || mhd_POLL_TYPE_INT_IS_KQUEUE (chosen_type)) 1499 chosen_type = mhd_POLL_TYPE_KQUEUE; /* No need to perform additional checking here */ 1500 # endif /* MHD_SUPPORT_KQUEUE */ 1501 # ifdef MHD_SUPPORT_EPOLL 1502 if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1503 || mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type)) 1504 { 1505 /* epoll - need to be probed here as it can be disabled in kernel */ 1506 enum MHD_StatusCode epoll_res; 1507 epoll_res = init_epoll_fd (d, 1508 true, 1509 mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type)); 1510 if (MHD_SC_OK != epoll_res) 1511 { 1512 if (mhd_POLL_TYPE_INT_IS_EPOLL (chosen_type)) 1513 return epoll_res; 1514 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type); 1515 } 1516 else 1517 chosen_type = mhd_POLL_TYPE_EPOLL; 1518 } 1519 # endif /* ! MHD_SUPPORT_EPOLL */ 1520 } 1521 else 1522 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type); 1523 } 1524 # ifdef MHD_SUPPORT_EPOLL 1525 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1526 (0 < d->events.data.epoll.early_fd)); 1527 mhd_assert ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) == \ 1528 (mhd_POLL_TYPE_EPOLL == chosen_type)); 1529 # endif /* ! MHD_SUPPORT_EPOLL */ 1530 #endif /* ! MHD_SUPPORT_EPOLL */ 1531 1532 if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1533 { 1534 #if defined(MHD_SUPPORT_POLL) 1535 chosen_type = mhd_POLL_TYPE_POLL; 1536 #elif defined(MHD_SUPPORT_SELECT) 1537 chosen_type = mhd_POLL_TYPE_SELECT; 1538 #else 1539 mhd_LOG_MSG (d, MHD_SC_FEATURE_DISABLED, \ 1540 "All suitable internal sockets polling technologies are " \ 1541 "disabled in this MHD build"); 1542 return MHD_SC_FEATURE_DISABLED; 1543 #endif 1544 } 1545 1546 switch (chosen_type) 1547 { 1548 case mhd_POLL_TYPE_EXT: 1549 mhd_assert ((MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL == s->work_mode.mode) || \ 1550 (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE == s->work_mode.mode)); 1551 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1552 d->events.poll_type = mhd_POLL_TYPE_EXT; 1553 d->events.data.extr.cb_data.cb = 1554 s->work_mode.params.v_external_event_loop_cb.reg_cb; 1555 d->events.data.extr.cb_data.cls = 1556 s->work_mode.params.v_external_event_loop_cb.reg_cb_cls; 1557 d->events.data.extr.reg_all = (MHD_NO != s->reregister_all); 1558 #ifdef MHD_SUPPORT_THREADS 1559 d->events.data.extr.itc_data.app_cntx = NULL; 1560 #endif /* MHD_SUPPORT_THREADS */ 1561 d->events.data.extr.listen_data.app_cntx = NULL; 1562 break; 1563 #ifdef MHD_SUPPORT_SELECT 1564 case mhd_POLL_TYPE_SELECT: 1565 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1566 mhd_assert (MHD_WM_EXTERNAL_SINGLE_FD_WATCH != s->work_mode.mode); 1567 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1568 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1569 mhd_assert (MHD_NO == s->reregister_all); 1570 d->events.poll_type = mhd_POLL_TYPE_SELECT; 1571 d->events.data.select.rfds = NULL; /* Memory allocated during event and threads init */ 1572 d->events.data.select.wfds = NULL; /* Memory allocated during event and threads init */ 1573 d->events.data.select.efds = NULL; /* Memory allocated during event and threads init */ 1574 break; 1575 #endif /* MHD_SUPPORT_SELECT */ 1576 #ifdef MHD_SUPPORT_POLL 1577 case mhd_POLL_TYPE_POLL: 1578 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1579 mhd_assert (MHD_WM_EXTERNAL_SINGLE_FD_WATCH != s->work_mode.mode); 1580 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1581 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1582 mhd_assert (MHD_NO == s->reregister_all); 1583 d->events.poll_type = mhd_POLL_TYPE_POLL; 1584 d->events.data.poll.fds = NULL; /* Memory allocated during event and threads init */ 1585 d->events.data.poll.rel = NULL; /* Memory allocated during event and threads init */ 1586 break; 1587 #endif /* MHD_SUPPORT_POLL */ 1588 #ifdef MHD_SUPPORT_EPOLL 1589 case mhd_POLL_TYPE_EPOLL: 1590 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1591 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1592 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1593 mhd_assert (MHD_NO == s->reregister_all); 1594 /* Pre-initialised by init_epoll_fd() */ 1595 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1596 mhd_assert (0 <= d->events.data.epoll.early_fd); 1597 d->events.data.epoll.events = NULL; /* Memory allocated during event and threads init */ 1598 d->events.data.epoll.num_elements = 0; 1599 break; 1600 #endif /* MHD_SUPPORT_EPOLL */ 1601 #ifdef MHD_SUPPORT_KQUEUE 1602 case mhd_POLL_TYPE_KQUEUE: 1603 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1604 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1605 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1606 mhd_assert (MHD_NO == s->reregister_all); 1607 d->events.poll_type = mhd_POLL_TYPE_KQUEUE; 1608 d->events.data.kq.kq_fd = -1; 1609 d->events.data.kq.kes = NULL; 1610 d->events.data.kq.num_elements = 0u; 1611 break; 1612 #endif /* MHD_SUPPORT_KQUEUE */ 1613 #ifndef MHD_SUPPORT_SELECT 1614 case mhd_POLL_TYPE_SELECT: 1615 #endif /* ! MHD_SUPPORT_SELECT */ 1616 #ifndef MHD_SUPPORT_POLL 1617 case mhd_POLL_TYPE_POLL: 1618 #endif /* ! MHD_SUPPORT_POLL */ 1619 case mhd_POLL_TYPE_NOT_SET_YET: 1620 default: 1621 mhd_UNREACHABLE (); 1622 return MHD_SC_INTERNAL_ERROR; 1623 } 1624 return MHD_SC_OK; 1625 } 1626 1627 1628 /** 1629 * Initialise network/sockets for the daemon. 1630 * Also choose events mode / sockets polling syscall. 1631 * @param d the daemon object 1632 * @param s the user settings 1633 * @return #MHD_SC_OK on success, 1634 * the error code otherwise 1635 */ 1636 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1637 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1638 daemon_init_net (struct MHD_Daemon *restrict d, 1639 struct DaemonOptions *restrict s) 1640 { 1641 enum MHD_StatusCode ret; 1642 1643 mhd_assert (! d->dbg.net_inited); 1644 mhd_assert (! d->dbg.net_deinited); 1645 #ifdef MHD_SOCKETS_KIND_POSIX 1646 d->net.cfg.max_fd_num = s->fd_number_limit; 1647 #endif /* MHD_SOCKETS_KIND_POSIX */ 1648 1649 ret = daemon_choose_and_preinit_events (d, s); 1650 if (MHD_SC_OK != ret) 1651 return ret; 1652 1653 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 1654 1655 /* No direct return of error codes is allowed beyond this point. 1656 Deinit/cleanup must be performed before return of any error. */ 1657 1658 #if defined(MHD_SOCKETS_KIND_POSIX) && defined(MHD_SUPPORT_SELECT) 1659 if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 1660 { 1661 if ((MHD_INVALID_SOCKET == d->net.cfg.max_fd_num) || 1662 (FD_SETSIZE < d->net.cfg.max_fd_num)) 1663 d->net.cfg.max_fd_num = FD_SETSIZE; 1664 } 1665 #endif /* MHD_SOCKETS_KIND_POSIX && MHD_SUPPORT_SELECT */ 1666 1667 if (MHD_SC_OK == ret) 1668 { 1669 ret = create_bind_listen_stream_socket (d, s); 1670 1671 if (MHD_SC_OK == ret) 1672 { 1673 if ((MHD_INVALID_SOCKET != d->net.listen.fd) 1674 && ! d->net.listen.non_block 1675 && (mhd_D_HAS_EDGE_TRIGG (d) || 1676 mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))) 1677 { 1678 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE, \ 1679 "The selected daemon work mode requires listening socket " 1680 "in non-blocking mode"); 1681 ret = MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE; 1682 } 1683 1684 if (MHD_SC_OK == ret) 1685 { 1686 if ((MHD_INVALID_SOCKET != d->net.listen.fd) && 1687 ((0 == d->net.listen.port) || 1688 (mhd_SOCKET_TYPE_UNKNOWN == d->net.listen.type))) 1689 detect_listen_type_and_port (d); 1690 1691 #ifndef NDEBUG 1692 d->dbg.net_inited = true; 1693 #endif 1694 return MHD_SC_OK; /* Success exit point */ 1695 } 1696 1697 /* Below is a cleanup path */ 1698 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1699 mhd_socket_close (d->net.listen.fd); 1700 } 1701 } 1702 1703 #ifdef MHD_SUPPORT_EPOLL 1704 /* Special case for epoll: epoll FD is probed early in events 1705 pre-initialisation and is not moved to events epoll FD therefore 1706 it needs cleanup here */ 1707 if (mhd_POLL_TYPE_EPOLL == d->events.poll_type) 1708 { 1709 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.early_fd); 1710 close (d->events.data.epoll.early_fd); 1711 } 1712 #endif /* MHD_SUPPORT_EPOLL */ 1713 1714 mhd_assert (MHD_SC_OK != ret); 1715 1716 return ret; 1717 } 1718 1719 1720 /** 1721 * Deinitialise daemon's network data 1722 * @param d the daemon object 1723 */ 1724 MHD_FN_PAR_NONNULL_ (1) static void 1725 daemon_deinit_net (struct MHD_Daemon *restrict d) 1726 { 1727 mhd_assert (d->dbg.net_inited); 1728 mhd_assert (! d->dbg.net_deinited); 1729 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 1730 #ifdef MHD_SUPPORT_EPOLL 1731 /* Special case for epoll: epoll FD is probed early in events 1732 pre-initialisation and could be not moved yet to events epoll FD 1733 therefore it needs cleanup here */ 1734 if ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) && 1735 (MHD_INVALID_SOCKET != d->events.data.epoll.early_fd)) 1736 { 1737 mhd_assert (0 < d->events.data.epoll.early_fd); 1738 close (d->events.data.epoll.early_fd); 1739 } 1740 #endif /* MHD_SUPPORT_EPOLL */ 1741 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1742 mhd_socket_close (d->net.listen.fd); 1743 1744 #ifndef NDEBUG 1745 d->dbg.net_deinited = true; 1746 #endif 1747 } 1748 1749 1750 #if 0 1751 void 1752 dauth_init (struct MHD_Daemon *restrict d, 1753 struct DaemonOptions *restrict s) 1754 { 1755 mhd_assert ((NULL == s->random_entropy.v_buf) || \ 1756 (0 != s->random_entropy.v_buf_size)); 1757 mhd_assert ((0 == s->random_entropy.v_buf_size) || \ 1758 (NULL != s->random_entropy.v_buf)); 1759 } 1760 1761 1762 #endif 1763 1764 #ifdef MHD_SUPPORT_AUTH_DIGEST 1765 /** 1766 * Initialise daemon Digest Auth data 1767 * @param d the daemon object 1768 * @param s the user settings 1769 * @return #MHD_SC_OK on success, 1770 * the error code otherwise 1771 */ 1772 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1773 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1774 daemon_init_auth_digest (struct MHD_Daemon *restrict d, 1775 struct DaemonOptions *restrict s) 1776 { 1777 enum MHD_StatusCode ret; 1778 size_t nonces_num; 1779 1780 if (0 == s->random_entropy.v_buf_size) 1781 { 1782 /* No initialisation needed */ 1783 #ifndef HAVE_NULL_PTR_ALL_ZEROS 1784 d->auth_dg.entropy.data = NULL; 1785 d->auth_dg.nonces = NULL; 1786 #endif 1787 return MHD_SC_OK; 1788 } 1789 nonces_num = s->auth_digest_map_size; 1790 if (0 == nonces_num) 1791 nonces_num = 1000; 1792 d->auth_dg.nonces = (struct mhd_DaemonAuthDigestNonceData *) 1793 mhd_calloc (nonces_num, \ 1794 sizeof(struct mhd_DaemonAuthDigestNonceData)); 1795 if (NULL == d->auth_dg.nonces) 1796 { 1797 mhd_LOG_MSG (d, \ 1798 MHD_SC_DAEMON_MEM_ALLOC_FAILURE, \ 1799 "Failed to allocate memory for Digest Auth array"); 1800 return MHD_SC_DAEMON_MEM_ALLOC_FAILURE; 1801 } 1802 d->auth_dg.cfg.nonces_num = nonces_num; 1803 1804 if (! mhd_mutex_init (&(d->auth_dg.nonces_lock))) 1805 { 1806 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1807 "Failed to initialise mutex for the Digest Auth data"); 1808 ret = MHD_SC_MUTEX_INIT_FAILURE; 1809 } 1810 else 1811 { 1812 if (! mhd_atomic_counter_init (&(d->auth_dg.num_gen_nonces), 0)) 1813 { 1814 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1815 "Failed to initialise mutex for the Digest Auth data"); 1816 ret = MHD_SC_MUTEX_INIT_FAILURE; 1817 } 1818 else 1819 { 1820 /* Move ownership of the entropy buffer */ 1821 d->auth_dg.entropy.data = (char *) s->random_entropy.v_buf; 1822 d->auth_dg.entropy.size = s->random_entropy.v_buf_size; 1823 s->random_entropy.v_buf = NULL; 1824 s->random_entropy.v_buf_size = 0; 1825 1826 d->auth_dg.cfg.nonce_tmout = s->auth_digest_nonce_timeout; 1827 if (0 == d->auth_dg.cfg.nonce_tmout) 1828 d->auth_dg.cfg.nonce_tmout = MHD_AUTH_DIGEST_DEF_TIMEOUT; 1829 d->auth_dg.cfg.def_max_nc = s->auth_digest_def_max_nc; 1830 if (0 == d->auth_dg.cfg.def_max_nc) 1831 d->auth_dg.cfg.def_max_nc = MHD_AUTH_DIGEST_DEF_MAX_NC; 1832 1833 return MHD_SC_OK; /* Success exit point */ 1834 } 1835 mhd_mutex_destroy_chk (&(d->auth_dg.nonces_lock)); 1836 } 1837 1838 free (d->auth_dg.nonces); 1839 mhd_assert (MHD_SC_OK != ret); 1840 return ret; /* Failure exit point */ 1841 } 1842 1843 1844 /** 1845 * Deinitialise daemon Digest Auth data 1846 * @param d the daemon object 1847 */ 1848 MHD_FN_PAR_NONNULL_ (1) static void 1849 daemon_deinit_auth_digest (struct MHD_Daemon *restrict d) 1850 { 1851 if (0 == d->auth_dg.entropy.size) 1852 return; /* Digest Auth not used, nothing to deinitialise */ 1853 1854 mhd_assert (NULL != d->auth_dg.entropy.data); 1855 free (d->auth_dg.entropy.data); 1856 mhd_atomic_counter_deinit (&(d->auth_dg.num_gen_nonces)); 1857 mhd_mutex_destroy_chk (&(d->auth_dg.nonces_lock)); 1858 mhd_assert (NULL != d->auth_dg.nonces); 1859 free (d->auth_dg.nonces); 1860 } 1861 1862 1863 #else /* MHD_SUPPORT_AUTH_DIGEST */ 1864 #define daemon_init_auth_digest(d,s) (MHD_SC_OK) 1865 #define daemon_deinit_auth_digest(d) ((void) 0) 1866 #endif /* MHD_SUPPORT_AUTH_DIGEST */ 1867 1868 1869 /** 1870 * Initialise daemon TLS data 1871 * @param d the daemon object 1872 * @param s the user settings 1873 * @return #MHD_SC_OK on success, 1874 * the error code otherwise 1875 */ 1876 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1877 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1878 daemon_init_tls (struct MHD_Daemon *restrict d, 1879 struct DaemonOptions *restrict s) 1880 { 1881 #ifdef MHD_SUPPORT_HTTPS 1882 mhd_StatusCodeInt ret; 1883 #endif /* MHD_SUPPORT_HTTPS */ 1884 1885 mhd_assert (! d->dbg.tls_inited); 1886 #ifdef MHD_SUPPORT_HTTPS 1887 d->tls = NULL; 1888 #endif 1889 1890 if (MHD_TLS_BACKEND_NONE == s->tls) 1891 { 1892 #ifndef NDEBUG 1893 d->dbg.tls_inited = true; 1894 #endif 1895 return MHD_SC_OK; 1896 } 1897 #ifndef MHD_SUPPORT_HTTPS 1898 mhd_LOG_MSG (d, \ 1899 MHD_SC_TLS_DISABLED, \ 1900 "HTTPS is not supported by this MHD build"); 1901 return MHD_SC_TLS_DISABLED; 1902 #else /* MHD_SUPPORT_HTTPS */ 1903 if (1) 1904 { 1905 enum mhd_TlsBackendAvailable tls_avail; 1906 1907 tls_avail = mhd_tls_is_backend_available (s); 1908 if (mhd_TLS_BACKEND_AVAIL_NOT_SUPPORTED == tls_avail) 1909 { 1910 mhd_LOG_MSG (d, \ 1911 MHD_SC_TLS_BACKEND_UNSUPPORTED, \ 1912 "The requested TLS backend is not supported " \ 1913 "by this MHD build"); 1914 return MHD_SC_TLS_BACKEND_UNSUPPORTED; 1915 } 1916 else if (mhd_TLS_BACKEND_AVAIL_NOT_AVAILABLE == tls_avail) 1917 { 1918 mhd_LOG_MSG (d, \ 1919 MHD_SC_TLS_BACKEND_UNAVAILABLE, \ 1920 "The requested TLS backend is not available"); 1921 return MHD_SC_TLS_BACKEND_UNAVAILABLE; 1922 } 1923 } 1924 ret = mhd_tls_daemon_init (d, 1925 mhd_D_HAS_EDGE_TRIGG (d), 1926 s, 1927 &(d->tls)); 1928 mhd_assert ((MHD_SC_OK == ret) || (NULL == d->tls)); 1929 mhd_assert ((MHD_SC_OK != ret) || (NULL != d->tls)); 1930 #ifndef NDEBUG 1931 d->dbg.tls_inited = (MHD_SC_OK == ret); 1932 #endif 1933 return (enum MHD_StatusCode) ret; 1934 #endif /* MHD_SUPPORT_HTTPS */ 1935 } 1936 1937 1938 /** 1939 * Deinitialise daemon TLS data 1940 * @param d the daemon object 1941 */ 1942 MHD_FN_PAR_NONNULL_ (1) static void 1943 daemon_deinit_tls (struct MHD_Daemon *restrict d) 1944 { 1945 mhd_assert (d->dbg.tls_inited); 1946 #ifdef MHD_SUPPORT_HTTPS 1947 if (NULL != d->tls) 1948 { 1949 mhd_tls_thread_cleanup (d->tls); 1950 mhd_tls_daemon_deinit (d->tls); 1951 } 1952 #elif defined(NDEBUG) 1953 (void) d; /* Mute compiler warning */ 1954 #endif 1955 } 1956 1957 1958 /** 1959 * Initialise large buffer tracking. 1960 * @param d the daemon object 1961 * @param s the user settings 1962 * @return #MHD_SC_OK on success, 1963 * the error code otherwise 1964 */ 1965 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1966 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1967 daemon_init_large_buf (struct MHD_Daemon *restrict d, 1968 struct DaemonOptions *restrict s) 1969 { 1970 mhd_assert (! mhd_D_HAS_MASTER (d)); 1971 mhd_assert (0 != d->conns.cfg.count_limit); 1972 mhd_assert (0 != d->conns.cfg.mem_pool_size); 1973 1974 d->req_cfg.large_buf.space_left = s->large_pool_size; 1975 if (SIZE_MAX == d->req_cfg.large_buf.space_left) 1976 d->req_cfg.large_buf.space_left = 1977 (d->conns.cfg.count_limit * d->conns.cfg.mem_pool_size) / 32; /* Use ~3% of the maximum memory used by connections */ 1978 1979 #ifndef NDEBUG 1980 d->dbg.initial_lbuf_size = d->req_cfg.large_buf.space_left; 1981 #endif 1982 1983 if (! mhd_mutex_init_short (&(d->req_cfg.large_buf.lock))) 1984 { 1985 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1986 "Failed to initialise mutex for the global large buffer."); 1987 return MHD_SC_MUTEX_INIT_FAILURE; 1988 } 1989 return MHD_SC_OK; 1990 } 1991 1992 1993 /** 1994 * Deinitialise large buffer tracking. 1995 * @param d the daemon object 1996 */ 1997 static MHD_FN_PAR_NONNULL_ (1) void 1998 daemon_deinit_large_buf (struct MHD_Daemon *restrict d) 1999 { 2000 /* All large buffer allocations must be freed / deallocated earlier */ 2001 mhd_assert (d->dbg.initial_lbuf_size == d->req_cfg.large_buf.space_left); 2002 mhd_mutex_destroy_chk (&(d->req_cfg.large_buf.lock)); 2003 } 2004 2005 2006 #ifdef MHD_SUPPORT_KQUEUE 2007 2008 /** 2009 * Initialise daemon's kqueue FD 2010 * @param d the daemon object 2011 * @return #MHD_SC_OK on success, 2012 * the error code otherwise 2013 */ 2014 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 2015 init_kqueue_fd (struct MHD_Daemon *restrict d) 2016 { 2017 int kq_fd; 2018 mhd_assert (mhd_POLL_TYPE_KQUEUE == d->events.poll_type); 2019 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 2020 mhd_assert (d->dbg.net_inited); 2021 mhd_assert (MHD_INVALID_SOCKET == d->events.data.kq.kq_fd); 2022 mhd_assert (NULL == d->events.data.kq.kes); 2023 mhd_assert (0u == d->events.data.kq.num_elements); 2024 2025 kq_fd = mhd_kqueue (); 2026 if (0 > kq_fd) 2027 { 2028 mhd_LOG_MSG (d, MHD_SC_KQUEUE_FD_CREATE_FAILED, \ 2029 "Failed to create kqueue FD"); 2030 return MHD_SC_KQUEUE_FD_CREATE_FAILED; /* Failure exit point */ 2031 } 2032 2033 if (! mhd_FD_FITS_DAEMON (d, kq_fd)) 2034 { 2035 mhd_LOG_MSG (d, MHD_SC_KQUEUE_FD_OUTSIDE_OF_SET_RANGE, \ 2036 "The kqueue FD value is higher than allowed"); 2037 (void) close (kq_fd); 2038 return MHD_SC_KQUEUE_FD_OUTSIDE_OF_SET_RANGE; /* Failure exit point */ 2039 } 2040 2041 if (! mhd_KQUEUE_HAS_CLOEXEC_SET ()) 2042 { 2043 if (! mhd_socket_noninheritable (kq_fd)) 2044 mhd_LOG_MSG (d, MHD_SC_KQUEUE_FD_SET_NOINHERIT_FAILED, \ 2045 "Failed to make kqueue FD non-inheritable"); 2046 } 2047 d->events.data.kq.kq_fd = kq_fd; 2048 2049 return MHD_SC_OK; /* Success exit point */ 2050 } 2051 2052 2053 /** 2054 * Deinitialise daemon's kqueue FD 2055 * @param d the daemon object 2056 */ 2057 MHD_FN_PAR_NONNULL_ (1) static void 2058 deinit_kqueue_fd (struct MHD_Daemon *restrict d) 2059 { 2060 mhd_assert (mhd_POLL_TYPE_KQUEUE == d->events.poll_type); 2061 mhd_assert (0 < d->events.data.kq.kq_fd); 2062 close (d->events.data.kq.kq_fd); 2063 } 2064 2065 2066 #endif /* MHD_SUPPORT_KQUEUE */ 2067 2068 2069 /** 2070 * Initialise daemon's events polling FD (if used by polling function) 2071 * @param d the daemon object 2072 * @return #MHD_SC_OK on success, 2073 * the error code otherwise 2074 */ 2075 static MHD_FN_PAR_NONNULL_ (1) 2076 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2077 init_events_fd (struct MHD_Daemon *restrict d) 2078 { 2079 enum MHD_StatusCode res; 2080 2081 res = MHD_SC_OK; 2082 switch (d->events.poll_type) 2083 { 2084 case mhd_POLL_TYPE_EXT: 2085 break; 2086 #ifdef MHD_SUPPORT_SELECT 2087 case mhd_POLL_TYPE_SELECT: 2088 break; 2089 #endif /* MHD_SUPPORT_SELECT */ 2090 #ifdef MHD_SUPPORT_POLL 2091 case mhd_POLL_TYPE_POLL: 2092 break; 2093 #endif /* MHD_SUPPORT_POLL */ 2094 #ifdef MHD_SUPPORT_EPOLL 2095 case mhd_POLL_TYPE_EPOLL: 2096 res = init_epoll_fd (d, 2097 false, 2098 true); 2099 break; 2100 #endif /* MHD_SUPPORT_EPOLL */ 2101 #ifdef MHD_SUPPORT_KQUEUE 2102 case mhd_POLL_TYPE_KQUEUE: 2103 res = init_kqueue_fd (d); 2104 break; 2105 #endif /* MHD_SUPPORT_KQUEUE */ 2106 #ifndef MHD_SUPPORT_SELECT 2107 case mhd_POLL_TYPE_SELECT: 2108 #endif /* ! MHD_SUPPORT_SELECT */ 2109 #ifndef MHD_SUPPORT_POLL 2110 case mhd_POLL_TYPE_POLL: 2111 #endif /* ! MHD_SUPPORT_POLL */ 2112 case mhd_POLL_TYPE_NOT_SET_YET: 2113 default: 2114 mhd_UNREACHABLE (); 2115 return MHD_SC_INTERNAL_ERROR; 2116 } 2117 2118 #ifndef NDEBUG 2119 if (MHD_SC_OK == res) 2120 d->dbg.events_fd_inited = true; 2121 #endif /* ! NDEBUG */ 2122 2123 return res; 2124 } 2125 2126 2127 /** 2128 * Deinitialise daemon's events polling FD (if used by polling function) 2129 * @param d the daemon object 2130 */ 2131 static MHD_FN_PAR_NONNULL_ (1) void 2132 deinit_events_fd (struct MHD_Daemon *restrict d) 2133 { 2134 mhd_assert (d->dbg.events_fd_inited); 2135 2136 switch (d->events.poll_type) 2137 { 2138 case mhd_POLL_TYPE_EXT: 2139 return; 2140 #ifdef MHD_SUPPORT_SELECT 2141 case mhd_POLL_TYPE_SELECT: 2142 return; 2143 #endif /* MHD_SUPPORT_SELECT */ 2144 #ifdef MHD_SUPPORT_POLL 2145 case mhd_POLL_TYPE_POLL: 2146 return; 2147 #endif /* MHD_SUPPORT_POLL */ 2148 #ifdef MHD_SUPPORT_EPOLL 2149 case mhd_POLL_TYPE_EPOLL: 2150 deinit_epoll_fd (d); 2151 return; 2152 #endif /* MHD_SUPPORT_EPOLL */ 2153 #ifdef MHD_SUPPORT_KQUEUE 2154 case mhd_POLL_TYPE_KQUEUE: 2155 deinit_kqueue_fd (d); 2156 return; 2157 #endif /* MHD_SUPPORT_KQUEUE */ 2158 #ifndef MHD_SUPPORT_SELECT 2159 case mhd_POLL_TYPE_SELECT: 2160 #endif /* ! MHD_SUPPORT_SELECT */ 2161 #ifndef MHD_SUPPORT_POLL 2162 case mhd_POLL_TYPE_POLL: 2163 #endif /* ! MHD_SUPPORT_POLL */ 2164 case mhd_POLL_TYPE_NOT_SET_YET: 2165 default: 2166 mhd_UNREACHABLE (); 2167 } 2168 return; 2169 } 2170 2171 2172 /** 2173 * Finish initialisation of events processing 2174 * @param d the daemon object 2175 * @return #MHD_SC_OK on success, 2176 * the error code otherwise 2177 */ 2178 static MHD_FN_PAR_NONNULL_ (1) 2179 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2180 allocate_events (struct MHD_Daemon *restrict d) 2181 { 2182 #if defined(MHD_SUPPORT_POLL) || defined(MHD_SUPPORT_EPOLL) 2183 /** 2184 * The number of elements to be monitored by sockets polling function 2185 */ 2186 unsigned int num_fd_elems; 2187 num_fd_elems = 0; 2188 #ifdef MHD_SUPPORT_THREADS 2189 ++num_fd_elems; /* For the ITC */ 2190 #endif 2191 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2192 ++num_fd_elems; /* For the listening socket */ 2193 if (! mhd_D_HAS_THR_PER_CONN (d)) 2194 num_fd_elems += d->conns.cfg.count_limit; 2195 #endif /* MHD_SUPPORT_POLL || MHD_SUPPORT_EPOLL */ 2196 2197 mhd_assert (0 != d->conns.cfg.count_limit); 2198 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2199 mhd_assert (! d->dbg.events_allocated); 2200 2201 mhd_DLINKEDL_INIT_LIST (&(d->events),proc_ready); 2202 2203 switch (d->events.poll_type) 2204 { 2205 case mhd_POLL_TYPE_EXT: 2206 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 2207 /* Nothing to do: allocation is not needed */ 2208 #ifndef NDEBUG 2209 d->dbg.events_allocated = true; 2210 #endif 2211 return MHD_SC_OK; /* Success exit point */ 2212 break; 2213 #ifdef MHD_SUPPORT_SELECT 2214 case mhd_POLL_TYPE_SELECT: 2215 /* The pointers have been set to NULL during pre-initialisations of the events */ 2216 mhd_assert (NULL == d->events.data.select.rfds); 2217 mhd_assert (NULL == d->events.data.select.wfds); 2218 mhd_assert (NULL == d->events.data.select.efds); 2219 d->events.data.select.rfds = (fd_set *) malloc (sizeof(fd_set)); 2220 if (NULL != d->events.data.select.rfds) 2221 { 2222 d->events.data.select.wfds = (fd_set *) malloc (sizeof(fd_set)); 2223 if (NULL != d->events.data.select.wfds) 2224 { 2225 d->events.data.select.efds = (fd_set *) malloc (sizeof(fd_set)); 2226 if (NULL != d->events.data.select.efds) 2227 { 2228 #ifndef NDEBUG 2229 d->dbg.num_events_elements = FD_SETSIZE; 2230 d->dbg.events_allocated = true; 2231 #endif 2232 return MHD_SC_OK; /* Success exit point */ 2233 } 2234 2235 free (d->events.data.select.wfds); 2236 } 2237 free (d->events.data.select.rfds); 2238 } 2239 mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \ 2240 "Failed to allocate memory for fd_sets for the daemon"); 2241 return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE; 2242 break; 2243 #endif /* MHD_SUPPORT_SELECT */ 2244 #ifdef MHD_SUPPORT_POLL 2245 case mhd_POLL_TYPE_POLL: 2246 /* The pointers have been set to NULL during pre-initialisations of the events */ 2247 mhd_assert (NULL == d->events.data.poll.fds); 2248 mhd_assert (NULL == d->events.data.poll.rel); 2249 if ((num_fd_elems > d->conns.cfg.count_limit) /* Check for value overflow */ 2250 || (mhd_D_HAS_THR_PER_CONN (d))) 2251 { 2252 d->events.data.poll.fds = 2253 (struct pollfd *) malloc (sizeof(struct pollfd) * num_fd_elems); 2254 if (NULL != d->events.data.poll.fds) 2255 { 2256 d->events.data.poll.rel = 2257 (union mhd_SocketRelation *) malloc (sizeof(union mhd_SocketRelation) 2258 * num_fd_elems); 2259 if (NULL != d->events.data.poll.rel) 2260 { 2261 #ifndef NDEBUG 2262 d->dbg.num_events_elements = num_fd_elems; 2263 d->dbg.events_allocated = true; 2264 #endif 2265 return MHD_SC_OK; /* Success exit point */ 2266 } 2267 free (d->events.data.poll.fds); 2268 } 2269 } 2270 mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \ 2271 "Failed to allocate memory for poll fds for the daemon"); 2272 return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE; 2273 break; 2274 #endif /* MHD_SUPPORT_POLL */ 2275 #ifdef MHD_SUPPORT_EPOLL 2276 case mhd_POLL_TYPE_EPOLL: 2277 mhd_ASSUME (! mhd_D_HAS_THR_PER_CONN (d)); 2278 /* The event FD has been created by event FD initialisation */ 2279 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 2280 mhd_assert (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd); 2281 /* The pointer has been set to NULL during pre-initialisation of the events */ 2282 mhd_assert (NULL == d->events.data.epoll.events); 2283 mhd_assert (0 == d->events.data.epoll.num_elements); 2284 if (1) 2285 { 2286 const unsigned int upper_limit = (sizeof(void*) >= 8) ? 4096u : 1024u; 2287 2288 mhd_assert (0 < (int) upper_limit); 2289 mhd_assert (upper_limit == (unsigned int) (size_t) upper_limit); 2290 2291 if (num_fd_elems < d->conns.cfg.count_limit) /* Check for value overflow */ 2292 num_fd_elems = upper_limit; 2293 /* Trade neglectable performance penalty for memory saving */ 2294 /* Very large amount of new events processed in batches */ 2295 else if (num_fd_elems > upper_limit) 2296 num_fd_elems = upper_limit; 2297 2298 d->events.data.epoll.events = 2299 (struct epoll_event *) malloc (sizeof(struct epoll_event) 2300 * num_fd_elems); 2301 if (NULL != d->events.data.epoll.events) 2302 { 2303 d->events.data.epoll.num_elements = num_fd_elems; 2304 #ifndef NDEBUG 2305 d->dbg.num_events_elements = num_fd_elems; 2306 d->dbg.events_allocated = true; 2307 #endif 2308 return MHD_SC_OK; /* Success exit point */ 2309 } 2310 } 2311 mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \ 2312 "Failed to allocate memory for epoll events for the daemon"); 2313 return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE; 2314 break; 2315 #endif /* MHD_SUPPORT_EPOLL */ 2316 #ifdef MHD_SUPPORT_KQUEUE 2317 case mhd_POLL_TYPE_KQUEUE: 2318 mhd_ASSUME (! mhd_D_HAS_THR_PER_CONN (d)); 2319 /* The event FD has been created by event FD initialisation */ 2320 mhd_assert (0 < d->events.data.kq.kq_fd); 2321 /* The pointer has been set to NULL during pre-initialisation of the events */ 2322 mhd_assert (NULL == d->events.data.kq.kes); 2323 mhd_assert (0u == d->events.data.kq.num_elements); 2324 if (1) 2325 { 2326 const unsigned int upper_limit = (sizeof(void*) >= 8) ? 4096u : 1024u; 2327 unsigned int num_event_elems = 0; 2328 2329 mhd_assert (0 < (int) upper_limit); 2330 mhd_assert (upper_limit == (unsigned int) (size_t) upper_limit); 2331 2332 #ifdef MHD_SUPPORT_THREADS 2333 ++num_event_elems; /* For the ITC */ 2334 #endif 2335 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2336 ++num_event_elems; /* For the listening socket */ 2337 2338 /* kqueue needs slots for individual combinations of FD + filter (send/recv) */ 2339 num_event_elems += 2 * d->conns.cfg.count_limit; 2340 if (d->conns.cfg.count_limit > (num_event_elems / 2)) /* Check for overflow */ 2341 num_event_elems = upper_limit; 2342 /* Trade neglectable performance penalty for memory saving */ 2343 /* Very large amount of new events processed in batches */ 2344 else if (upper_limit < num_event_elems) 2345 num_event_elems = upper_limit; 2346 2347 /* Make sure that run-time overflow check is easy */ 2348 mhd_assert (((int) num_event_elems) > 0); 2349 mhd_assert (((int) (num_event_elems + 1)) > 0); 2350 2351 d->events.data.kq.kes = 2352 (struct kevent *) malloc (sizeof(struct kevent) * num_event_elems); 2353 if (NULL != d->events.data.kq.kes) 2354 { 2355 d->events.data.kq.num_elements = num_event_elems; 2356 #ifndef NDEBUG 2357 d->dbg.num_events_elements = num_event_elems; 2358 d->dbg.events_allocated = true; 2359 #endif 2360 return MHD_SC_OK; /* Success exit point */ 2361 } 2362 } 2363 mhd_LOG_MSG (d, MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE, \ 2364 "Failed to allocate memory for kqueue events for the daemon"); 2365 return MHD_SC_EVENTS_MEMORY_ALLOCATE_FAILURE; 2366 break; 2367 #endif /* MHD_SUPPORT_KQUEUE */ 2368 #ifndef MHD_SUPPORT_SELECT 2369 case mhd_POLL_TYPE_SELECT: 2370 #endif /* ! MHD_SUPPORT_SELECT */ 2371 #ifndef MHD_SUPPORT_POLL 2372 case mhd_POLL_TYPE_POLL: 2373 #endif /* ! MHD_SUPPORT_POLL */ 2374 case mhd_POLL_TYPE_NOT_SET_YET: 2375 default: 2376 mhd_UNREACHABLE (); 2377 break; 2378 } 2379 mhd_UNREACHABLE (); 2380 return MHD_SC_INTERNAL_ERROR; 2381 } 2382 2383 2384 /** 2385 * Deallocate events data 2386 * @param d the daemon object 2387 */ 2388 static MHD_FN_PAR_NONNULL_ (1) void 2389 deallocate_events (struct MHD_Daemon *restrict d) 2390 { 2391 mhd_assert (d->dbg.events_allocated); 2392 mhd_assert (0 != d->conns.cfg.count_limit); 2393 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2394 switch (d->events.poll_type) 2395 { 2396 case mhd_POLL_TYPE_EXT: 2397 break; 2398 #ifdef MHD_SUPPORT_SELECT 2399 case mhd_POLL_TYPE_SELECT: 2400 mhd_assert (NULL != d->events.data.select.efds); 2401 mhd_assert (NULL != d->events.data.select.wfds); 2402 mhd_assert (NULL != d->events.data.select.rfds); 2403 free (d->events.data.select.efds); 2404 free (d->events.data.select.wfds); 2405 free (d->events.data.select.rfds); 2406 break; 2407 #endif /* MHD_SUPPORT_SELECT */ 2408 #ifdef MHD_SUPPORT_POLL 2409 case mhd_POLL_TYPE_POLL: 2410 mhd_assert (NULL != d->events.data.poll.rel); 2411 mhd_assert (NULL != d->events.data.poll.fds); 2412 free (d->events.data.poll.rel); 2413 free (d->events.data.poll.fds); 2414 break; 2415 #endif /* MHD_SUPPORT_POLL */ 2416 #ifdef MHD_SUPPORT_EPOLL 2417 case mhd_POLL_TYPE_EPOLL: 2418 mhd_assert (0 != d->events.data.epoll.num_elements); 2419 mhd_assert (NULL != d->events.data.epoll.events); 2420 free (d->events.data.epoll.events); 2421 break; 2422 #endif /* MHD_SUPPORT_EPOLL */ 2423 #ifdef MHD_SUPPORT_KQUEUE 2424 case mhd_POLL_TYPE_KQUEUE: 2425 mhd_assert (0 != d->events.data.kq.num_elements); 2426 mhd_assert (NULL != d->events.data.kq.kes); 2427 free (d->events.data.kq.kes); 2428 break; 2429 #endif /* MHD_SUPPORT_KQUEUE */ 2430 #ifndef MHD_SUPPORT_SELECT 2431 case mhd_POLL_TYPE_SELECT: 2432 #endif /* ! MHD_SUPPORT_SELECT */ 2433 #ifndef MHD_SUPPORT_POLL 2434 case mhd_POLL_TYPE_POLL: 2435 #endif /* ! MHD_SUPPORT_POLL */ 2436 case mhd_POLL_TYPE_NOT_SET_YET: 2437 default: 2438 mhd_UNREACHABLE_D ("Wrong workflow"); 2439 } 2440 #ifndef NDEBUG 2441 d->dbg.events_allocated = false; 2442 #endif 2443 return; 2444 } 2445 2446 2447 /** 2448 * Initialise daemon's ITC 2449 * @param d the daemon object 2450 * @return #MHD_SC_OK on success, 2451 * the error code otherwise 2452 */ 2453 static MHD_FN_PAR_NONNULL_ (1) 2454 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2455 init_itc (struct MHD_Daemon *restrict d) 2456 { 2457 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2458 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2459 #ifdef MHD_SUPPORT_THREADS 2460 // TODO: add and process "thread unsafe" daemon's option 2461 if (! mhd_itc_init (&(d->threading.itc))) 2462 { 2463 #if defined(MHD_ITC_EVENTFD_) 2464 mhd_LOG_MSG ( \ 2465 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2466 "Failed to initialise eventFD for inter-thread communication"); 2467 #elif defined(MHD_ITC_PIPE_) 2468 mhd_LOG_MSG ( \ 2469 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2470 "Failed to create a pipe for inter-thread communication"); 2471 #elif defined(MHD_ITC_SOCKETPAIR_) 2472 mhd_LOG_MSG ( \ 2473 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2474 "Failed to create a socketpair for inter-thread communication"); 2475 #else 2476 #warning Missing expicit handling of the ITC type 2477 mhd_LOG_MSG ( \ 2478 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2479 "Failed to initialise inter-thread communication"); 2480 #endif 2481 return MHD_SC_ITC_INITIALIZATION_FAILED; 2482 } 2483 if (! mhd_FD_FITS_DAEMON (d,mhd_itc_r_fd (d->threading.itc))) 2484 { 2485 mhd_LOG_MSG (d, MHD_SC_ITC_FD_OUTSIDE_OF_SET_RANGE, \ 2486 "The inter-thread communication FD value is " \ 2487 "higher than allowed"); 2488 (void) mhd_itc_destroy (d->threading.itc); 2489 mhd_itc_set_invalid (&(d->threading.itc)); 2490 return MHD_SC_ITC_FD_OUTSIDE_OF_SET_RANGE; 2491 } 2492 #else /* ! MHD_SUPPORT_THREADS */ 2493 (void) d; /* Unused */ 2494 #endif /* ! MHD_SUPPORT_THREADS */ 2495 return MHD_SC_OK; 2496 } 2497 2498 2499 /** 2500 * Deallocate events data 2501 * @param d the daemon object 2502 */ 2503 static MHD_FN_PAR_NONNULL_ (1) void 2504 deinit_itc (struct MHD_Daemon *restrict d) 2505 { 2506 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2507 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2508 #ifdef MHD_SUPPORT_THREADS 2509 // TODO: add and process "thread unsafe" daemon's option 2510 mhd_assert (! mhd_ITC_IS_INVALID (d->threading.itc)); 2511 (void) mhd_itc_destroy (d->threading.itc); 2512 #else /* ! MHD_SUPPORT_THREADS */ 2513 (void) d; /* Unused */ 2514 #endif /* ! MHD_SUPPORT_THREADS */ 2515 } 2516 2517 2518 /** 2519 * The final part of events initialisation: pre-add ITC and listening FD to 2520 * the monitored items (if supported by monitoring syscall). 2521 * @param d the daemon object 2522 * @return #MHD_SC_OK on success, 2523 * the error code otherwise 2524 */ 2525 static MHD_FN_PAR_NONNULL_ (1) 2526 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2527 init_daemon_fds_monitoring (struct MHD_Daemon *restrict d) 2528 { 2529 mhd_assert (d->dbg.net_inited); 2530 mhd_assert (! d->dbg.net_deinited); 2531 mhd_assert (d->dbg.events_allocated); 2532 mhd_assert (! d->dbg.events_fully_inited); 2533 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2534 #ifdef MHD_SUPPORT_THREADS 2535 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 2536 #endif 2537 2538 d->events.accept_pending = false; 2539 2540 switch (d->events.poll_type) 2541 { 2542 case mhd_POLL_TYPE_EXT: 2543 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 2544 #ifdef MHD_SUPPORT_THREADS 2545 d->events.data.extr.itc_data.is_active = false; 2546 d->events.data.extr.itc_data.is_broken = false; 2547 #endif /* MHD_SUPPORT_THREADS */ 2548 if (! d->events.data.extr.reg_all) 2549 { 2550 bool itc_reg_succeed; 2551 2552 /* Register daemon's FDs now */ 2553 #ifdef MHD_SUPPORT_THREADS 2554 d->events.data.extr.itc_data.app_cntx = 2555 mhd_daemon_extr_event_reg (d, 2556 mhd_itc_r_fd (d->threading.itc), 2557 MHD_FD_STATE_RECV_EXCEPT, 2558 NULL, 2559 (struct MHD_EventUpdateContext *) 2560 mhd_SOCKET_REL_MARKER_ITC); 2561 itc_reg_succeed = (NULL != d->events.data.extr.itc_data.app_cntx); 2562 #else /* ! MHD_SUPPORT_THREADS */ 2563 itc_reg_succeed = true; 2564 #endif /* ! MHD_SUPPORT_THREADS */ 2565 if (itc_reg_succeed) 2566 { 2567 if (MHD_INVALID_SOCKET == d->net.listen.fd) 2568 { 2569 d->events.data.extr.listen_data.app_cntx = NULL; 2570 return MHD_SC_OK; /* Success exit point */ 2571 } 2572 2573 /* Need to register the listen FD */ 2574 d->events.data.extr.listen_data.app_cntx = 2575 mhd_daemon_extr_event_reg (d, 2576 d->net.listen.fd, 2577 MHD_FD_STATE_RECV_EXCEPT, 2578 NULL, 2579 (struct MHD_EventUpdateContext *) 2580 mhd_SOCKET_REL_MARKER_LISTEN); 2581 if (NULL != d->events.data.extr.listen_data.app_cntx) 2582 return MHD_SC_OK; /* Success exit point */ 2583 2584 /* Below is a clean-up path for 'case mhd_POLL_TYPE_EXT:' */ 2585 #ifdef MHD_SUPPORT_THREADS 2586 /* De-register ITC FD */ 2587 (void) mhd_daemon_extr_event_reg (d, 2588 mhd_itc_r_fd (d->threading.itc), 2589 MHD_FD_STATE_NONE, 2590 d->events.data.extr.itc_data.app_cntx, 2591 (struct MHD_EventUpdateContext *) 2592 mhd_SOCKET_REL_MARKER_ITC); 2593 d->events.data.extr.itc_data.app_cntx = NULL; 2594 #endif /* MHD_SUPPORT_THREADS */ 2595 } 2596 2597 mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \ 2598 "Failed to register daemon FDs in the application " 2599 "(external events) monitoring."); 2600 return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE; 2601 } 2602 else 2603 { 2604 /* Daemons FDs are repeatedly registered every processing cycle */ 2605 #ifdef MHD_SUPPORT_THREADS 2606 d->events.data.extr.itc_data.app_cntx = NULL; 2607 #endif /* MHD_SUPPORT_THREADS */ 2608 d->events.data.extr.listen_data.app_cntx = NULL; 2609 return MHD_SC_OK; 2610 } 2611 break; 2612 #ifdef MHD_SUPPORT_SELECT 2613 case mhd_POLL_TYPE_SELECT: 2614 mhd_assert (NULL != d->events.data.select.rfds); 2615 mhd_assert (NULL != d->events.data.select.wfds); 2616 mhd_assert (NULL != d->events.data.select.efds); 2617 /* Nothing to do when using 'select()' */ 2618 return MHD_SC_OK; 2619 break; 2620 #endif /* MHD_SUPPORT_SELECT */ 2621 #ifdef MHD_SUPPORT_POLL 2622 case mhd_POLL_TYPE_POLL: 2623 mhd_assert (NULL != d->events.data.poll.fds); 2624 mhd_assert (NULL != d->events.data.poll.rel); 2625 if (1) 2626 { 2627 unsigned int i; 2628 i = 0; 2629 #ifdef MHD_SUPPORT_THREADS 2630 d->events.data.poll.fds[i].fd = mhd_itc_r_fd (d->threading.itc); 2631 d->events.data.poll.fds[i].events = POLLIN; 2632 d->events.data.poll.rel[i].fd_id = mhd_SOCKET_REL_MARKER_ITC; 2633 ++i; 2634 #endif 2635 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2636 { 2637 d->events.data.poll.fds[i].fd = d->net.listen.fd; 2638 d->events.data.poll.fds[i].events = POLLIN; 2639 d->events.data.poll.rel[i].fd_id = mhd_SOCKET_REL_MARKER_LISTEN; 2640 } 2641 } 2642 return MHD_SC_OK; 2643 break; 2644 #endif /* MHD_SUPPORT_POLL */ 2645 #ifdef MHD_SUPPORT_EPOLL 2646 case mhd_POLL_TYPE_EPOLL: 2647 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 2648 mhd_assert (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd); 2649 mhd_assert (NULL != d->events.data.epoll.events); 2650 mhd_assert (0 < d->events.data.epoll.num_elements); 2651 if (1) 2652 { 2653 struct epoll_event reg_event; 2654 #ifdef MHD_SUPPORT_THREADS 2655 reg_event.events = EPOLLIN | EPOLLET; 2656 reg_event.data.u64 = (uint64_t) mhd_SOCKET_REL_MARKER_ITC; /* uint64_t is used in the epoll header */ 2657 if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD, 2658 mhd_itc_r_fd (d->threading.itc), ®_event)) 2659 { 2660 mhd_LOG_MSG (d, MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE, \ 2661 "Failed to add ITC FD to the epoll monitoring."); 2662 return MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE; 2663 } 2664 mhd_dbg_print_fd_mon_req ("ITC", \ 2665 mhd_itc_r_fd (d->threading.itc), \ 2666 true, \ 2667 false, \ 2668 false); 2669 #endif 2670 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2671 { 2672 reg_event.events = EPOLLIN; 2673 reg_event.data.u64 = (uint64_t) mhd_SOCKET_REL_MARKER_LISTEN; /* uint64_t is used in the epoll header */ 2674 if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD, 2675 d->net.listen.fd, ®_event)) 2676 { 2677 mhd_LOG_MSG (d, MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE, \ 2678 "Failed to add listening FD to the epoll monitoring."); 2679 return MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE; 2680 } 2681 mhd_dbg_print_fd_mon_req ("lstn", \ 2682 d->net.listen.fd, \ 2683 true, \ 2684 false, \ 2685 false); 2686 } 2687 } 2688 return MHD_SC_OK; 2689 break; 2690 #endif /* MHD_SUPPORT_EPOLL */ 2691 #ifdef MHD_SUPPORT_KQUEUE 2692 case mhd_POLL_TYPE_KQUEUE: 2693 mhd_assert (0 < d->events.data.kq.kq_fd); 2694 mhd_assert (NULL != d->events.data.kq.kes); 2695 mhd_assert (2u < d->events.data.kq.num_elements); 2696 if (1) 2697 { 2698 int num_elemnts; 2699 2700 num_elemnts = 0; 2701 #ifdef MHD_SUPPORT_THREADS 2702 mhd_KE_SET (d->events.data.kq.kes + num_elemnts, 2703 mhd_itc_r_fd (d->threading.itc), 2704 EVFILT_READ, 2705 EV_ADD, /* level trigger */ 2706 mhd_SOCKET_REL_PTRMARKER_ITC); 2707 mhd_dbg_print_kevent_change ("ITC", 2708 d->events.data.kq.kes + num_elemnts); 2709 ++num_elemnts; 2710 #endif 2711 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2712 { 2713 mhd_KE_SET (d->events.data.kq.kes + num_elemnts, 2714 d->net.listen.fd, 2715 EVFILT_READ, 2716 EV_ADD, /* level trigger */ 2717 mhd_SOCKET_REL_PTRMARKER_LISTEN); 2718 2719 mhd_dbg_print_kevent_change ("lstn", 2720 d->events.data.kq.kes + num_elemnts); 2721 ++num_elemnts; 2722 } 2723 2724 if (0 != num_elemnts) 2725 { 2726 static const struct timespec zero_timeout = {0, 0}; 2727 int res; 2728 2729 #ifdef MHD_USE_TRACE_POLLING_FDS 2730 fprintf (stderr, 2731 "### (Starting) kevent(%d, changes, %d, [NULL], " 2732 "0, [0, 0])...\n", 2733 d->events.data.kq.kq_fd, 2734 num_elemnts); 2735 #endif /* MHD_USE_TRACE_POLLING_FDS */ 2736 res = kevent (d->events.data.kq.kq_fd, 2737 d->events.data.kq.kes, 2738 num_elemnts, 2739 NULL, 2740 0, 2741 &zero_timeout); 2742 #ifdef MHD_USE_TRACE_POLLING_FDS 2743 fprintf (stderr, 2744 "### (Finished) kevent(%d, changes, %d, [NULL], " 2745 "0, [0, 0]) -> %d\n", 2746 d->events.data.kq.kq_fd, 2747 num_elemnts, 2748 res); 2749 #endif /* MHD_USE_TRACE_POLLING_FDS */ 2750 if (0 != res) 2751 { 2752 mhd_LOG_MSG (d, MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE, \ 2753 "Failed to add ITC or listening FD to the " 2754 "kqueue monitoring."); 2755 return MHD_SC_EVENTS_REG_DAEMON_FDS_FAILURE; 2756 } 2757 } 2758 } 2759 return MHD_SC_OK; 2760 break; 2761 #endif /* MHD_SUPPORT_KQUEUE */ 2762 #ifndef MHD_SUPPORT_SELECT 2763 case mhd_POLL_TYPE_SELECT: 2764 #endif /* ! MHD_SUPPORT_SELECT */ 2765 #ifndef MHD_SUPPORT_POLL 2766 case mhd_POLL_TYPE_POLL: 2767 #endif /* ! MHD_SUPPORT_POLL */ 2768 case mhd_POLL_TYPE_NOT_SET_YET: 2769 default: 2770 mhd_UNREACHABLE (); 2771 break; 2772 } 2773 mhd_UNREACHABLE (); 2774 return MHD_SC_INTERNAL_ERROR; 2775 } 2776 2777 2778 /** 2779 * The initial part of events de-initialisation: remove ITC and listening FD 2780 * from the monitored items (if supported by monitoring syscall). 2781 * @param d the daemon object 2782 */ 2783 static MHD_FN_PAR_NONNULL_ (1) void 2784 deinit_daemon_fds_monitoring (struct MHD_Daemon *restrict d) 2785 { 2786 mhd_assert (d->dbg.events_fully_inited); 2787 2788 switch (d->events.poll_type) 2789 { 2790 case mhd_POLL_TYPE_EXT: 2791 if (NULL != d->events.data.extr.listen_data.app_cntx) 2792 (void) mhd_daemon_extr_event_reg ( 2793 d, 2794 d->net.listen.fd, 2795 MHD_FD_STATE_NONE, 2796 d->events.data.extr.listen_data.app_cntx, 2797 (struct MHD_EventUpdateContext *) mhd_SOCKET_REL_MARKER_LISTEN); 2798 #ifdef MHD_SUPPORT_THREADS 2799 if (NULL != d->events.data.extr.itc_data.app_cntx) 2800 (void) mhd_daemon_extr_event_reg (d, 2801 mhd_itc_r_fd (d->threading.itc), 2802 MHD_FD_STATE_NONE, 2803 d->events.data.extr.itc_data.app_cntx, 2804 (struct MHD_EventUpdateContext *) 2805 mhd_SOCKET_REL_MARKER_ITC); 2806 #endif /* MHD_SUPPORT_THREADS */ 2807 return; 2808 #ifdef MHD_SUPPORT_SELECT 2809 case mhd_POLL_TYPE_SELECT: 2810 /* Nothing to do when using 'select()' */ 2811 return; 2812 break; 2813 #endif /* MHD_SUPPORT_SELECT */ 2814 #ifdef MHD_SUPPORT_POLL 2815 case mhd_POLL_TYPE_POLL: 2816 /* Nothing to do when using 'poll()' */ 2817 return; 2818 break; 2819 #endif /* MHD_SUPPORT_POLL */ 2820 #ifdef MHD_SUPPORT_EPOLL 2821 case mhd_POLL_TYPE_EPOLL: 2822 /* Nothing to do when using epoll. 2823 Monitoring stopped by closing epoll FD. */ 2824 return; 2825 break; 2826 #endif /* MHD_SUPPORT_EPOLL */ 2827 #ifdef MHD_SUPPORT_KQUEUE 2828 case mhd_POLL_TYPE_KQUEUE: 2829 /* Nothing to do when using kqueue. 2830 Monitoring stopped by closing kqueue FD. */ 2831 return; 2832 break; 2833 #endif /* MHD_SUPPORT_EPOLL */ 2834 #ifndef MHD_SUPPORT_SELECT 2835 case mhd_POLL_TYPE_SELECT: 2836 #endif /* ! MHD_SUPPORT_SELECT */ 2837 #ifndef MHD_SUPPORT_POLL 2838 case mhd_POLL_TYPE_POLL: 2839 #endif /* ! MHD_SUPPORT_POLL */ 2840 case mhd_POLL_TYPE_NOT_SET_YET: 2841 default: 2842 mhd_UNREACHABLE (); 2843 break; 2844 } 2845 mhd_UNREACHABLE (); 2846 } 2847 2848 2849 /** 2850 * Initialise daemon connections' data. 2851 * @param d the daemon object 2852 * @param s the user settings 2853 * @return #MHD_SC_OK on success, 2854 * the error code otherwise 2855 */ 2856 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2857 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2858 init_individual_conns (struct MHD_Daemon *restrict d, 2859 struct DaemonOptions *restrict s) 2860 { 2861 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2862 mhd_assert (0 != d->conns.cfg.count_limit); 2863 2864 mhd_DLINKEDL_INIT_LIST (&(d->conns),all_conn); 2865 mhd_DLINKEDL_INIT_LIST (&(d->conns),def_timeout); 2866 mhd_DLINKEDL_INIT_LIST (&(d->conns),cust_timeout); 2867 d->conns.count = 0; 2868 d->conns.block_new = false; 2869 2870 d->conns.cfg.mem_pool_size = s->conn_memory_limit; 2871 if (0 == d->conns.cfg.mem_pool_size) 2872 d->conns.cfg.mem_pool_size = 32 * 1024; 2873 else if (256 > d->conns.cfg.mem_pool_size) 2874 d->conns.cfg.mem_pool_size = 256; 2875 2876 switch (s->conn_buff_zeroing) 2877 { 2878 case MHD_CONN_BUFFER_ZEROING_DISABLED: 2879 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_NEVER; 2880 break; 2881 case MHD_CONN_BUFFER_ZEROING_BASIC: 2882 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ON_RESET; 2883 break; 2884 case MHD_CONN_BUFFER_ZEROING_HEAVY: 2885 default: 2886 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ALWAYS; 2887 break; 2888 } 2889 2890 #ifdef MHD_SUPPORT_UPGRADE 2891 mhd_DLINKEDL_INIT_LIST (&(d->conns.upgr),upgr_cleanup); 2892 if (! mhd_mutex_init (&(d->conns.upgr.ucu_lock))) 2893 { 2894 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 2895 "Failed to initialise mutex for the upgraded " \ 2896 "connection list."); 2897 return MHD_SC_MUTEX_INIT_FAILURE; 2898 } 2899 #endif /* MHD_SUPPORT_UPGRADE */ 2900 2901 #ifndef NDEBUG 2902 d->dbg.connections_inited = true; 2903 #endif 2904 return MHD_SC_OK; 2905 } 2906 2907 2908 /** 2909 * Deinitialise daemon connections' data. 2910 * @param d the daemon object 2911 */ 2912 static MHD_FN_PAR_NONNULL_ (1) void 2913 deinit_individual_conns (struct MHD_Daemon *restrict d) 2914 { 2915 #ifdef MHD_SUPPORT_UPGRADE 2916 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr),upgr_cleanup)); 2917 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns.upgr),upgr_cleanup)); 2918 2919 mhd_mutex_destroy_chk (&(d->conns.upgr.ucu_lock)); 2920 #endif /* MHD_SUPPORT_UPGRADE */ 2921 2922 mhd_assert (0 != d->conns.cfg.mem_pool_size); 2923 mhd_assert (0 == d->conns.count); 2924 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),cust_timeout)); 2925 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),cust_timeout)); 2926 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),def_timeout)); 2927 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),def_timeout)); 2928 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)); 2929 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)); 2930 } 2931 2932 2933 /** 2934 * Prepare daemon-local (worker daemon for thread pool mode) threading data 2935 * and finish events initialising. 2936 * To be used only with non-master daemons. 2937 * Do not start the thread even if configured for the internal threads. 2938 * @param d the daemon object 2939 * @return #MHD_SC_OK on success, 2940 * the error code otherwise 2941 */ 2942 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2943 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2944 init_individual_thread_data_events_conns (struct MHD_Daemon *restrict d, 2945 struct DaemonOptions *restrict s) 2946 { 2947 enum MHD_StatusCode res; 2948 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2949 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2950 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2951 mhd_assert (! d->dbg.connections_inited); 2952 2953 res = init_events_fd (d); 2954 if (MHD_SC_OK != res) 2955 return res; 2956 2957 res = allocate_events (d); 2958 if (MHD_SC_OK == res) 2959 { 2960 res = init_itc (d); 2961 if (MHD_SC_OK == res) 2962 { 2963 res = init_daemon_fds_monitoring (d); 2964 2965 if (MHD_SC_OK == res) 2966 { 2967 #ifndef NDEBUG 2968 d->dbg.events_fully_inited = true; 2969 #endif 2970 #ifdef MHD_SUPPORT_THREADS 2971 mhd_thread_handle_ID_set_invalid (&(d->threading.tid)); 2972 d->threading.stop_requested = false; 2973 #endif /* MHD_SUPPORT_THREADS */ 2974 #ifndef NDEBUG 2975 d->dbg.threading_inited = true; 2976 #endif 2977 2978 res = init_individual_conns (d, s); 2979 if (MHD_SC_OK == res) 2980 return MHD_SC_OK; 2981 2982 /* Below is a clean-up path */ 2983 2984 deinit_daemon_fds_monitoring (d); 2985 } 2986 deinit_itc (d); 2987 } 2988 deallocate_events (d); 2989 } 2990 deinit_events_fd (d); 2991 2992 mhd_assert (MHD_SC_OK != res); 2993 return res; 2994 } 2995 2996 2997 /** 2998 * Deinit daemon-local (worker daemon for thread pool mode) threading data 2999 * and deallocate events. 3000 * To be used only with non-master daemons. 3001 * The internal thread (is any) must be stopped already. 3002 * @param d the daemon object 3003 */ 3004 static MHD_FN_PAR_NONNULL_ (1) void 3005 deinit_individual_thread_data_events_conns (struct MHD_Daemon *restrict d) 3006 { 3007 deinit_individual_conns (d); 3008 deinit_daemon_fds_monitoring (d); 3009 deinit_itc (d); 3010 deallocate_events (d); 3011 deinit_events_fd (d); 3012 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)); 3013 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready)); 3014 #ifndef NDEBUG 3015 d->dbg.events_fully_inited = false; 3016 #endif 3017 } 3018 3019 3020 /** 3021 * Initialise the data specific only for the worker daemon. 3022 * @param d the daemon object 3023 * @return #MHD_SC_OK on success, 3024 * the error code otherwise 3025 */ 3026 static MHD_FN_PAR_NONNULL_ (1) 3027 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3028 init_worker_only_data (struct MHD_Daemon *restrict d) 3029 { 3030 enum MHD_StatusCode res; 3031 struct mhd_DaemonExtAddedConnectionsWorker *worker_only_data = 3032 &(d->events.act_req.ext_added.worker); 3033 3034 mhd_assert (! mhd_D_HAS_WORKERS (d)); 3035 mhd_assert (d->dbg.net_inited); 3036 mhd_assert (! d->dbg.worker_only_inited); 3037 mhd_assert (mhd_D_HAS_MASTER (d) || ! d->dbg.master_only_inited); 3038 mhd_assert (! mhd_D_HAS_MASTER (d) || d->dbg.master_only_inited); /* Copied from master daemon */ 3039 3040 #ifndef NDEBUG 3041 /* "master"-only data will be overwritten here without de-initialising */ 3042 d->dbg.master_only_inited = false; 3043 #endif /* ! NDEBUG */ 3044 3045 mhd_DLINKEDL_INIT_LIST (worker_only_data, queue); 3046 3047 if (! mhd_mutex_init (&(worker_only_data->q_lock))) 3048 { 3049 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 3050 "Failed to initialise mutex for externally added " 3051 "connections"); 3052 res = MHD_SC_MUTEX_INIT_FAILURE; 3053 } 3054 else 3055 res = MHD_SC_OK; 3056 3057 #ifndef NDEBUG 3058 if (MHD_SC_OK == res) 3059 d->dbg.worker_only_inited = true; 3060 #endif /* ! NDEBUG */ 3061 3062 return res; 3063 } 3064 3065 3066 /** 3067 * De-initialise the data specific only for the worker daemon. 3068 * @param d the daemon object 3069 */ 3070 static MHD_FN_PAR_NONNULL_ (1) void 3071 deinit_worker_only_data (struct MHD_Daemon *restrict d) 3072 { 3073 struct mhd_DaemonExtAddedConnectionsWorker *worker_only_data = 3074 &(d->events.act_req.ext_added.worker); 3075 struct mhd_DaemonExtAddedConn *q_e; 3076 3077 mhd_assert (! mhd_D_HAS_WORKERS (d)); 3078 mhd_assert (d->dbg.net_inited); 3079 mhd_assert (d->dbg.worker_only_inited); 3080 mhd_assert (! d->dbg.master_only_inited); 3081 3082 /* Clean-up all unprocessed entries */ 3083 3084 for (q_e = mhd_DLINKEDL_GET_FIRST (worker_only_data, queue); 3085 NULL != q_e; 3086 q_e = mhd_DLINKEDL_GET_FIRST (worker_only_data, queue)) 3087 { 3088 mhd_ASSUME (NULL == mhd_DLINKEDL_GET_PREV (q_e, queue)); 3089 mhd_DLINKEDL_DEL (worker_only_data, q_e, queue); 3090 mhd_socket_close (q_e->skt); 3091 3092 if (NULL != q_e->addr) 3093 free (q_e->addr); 3094 3095 free (q_e); 3096 } 3097 3098 mhd_mutex_destroy_chk (&(worker_only_data->q_lock)); 3099 3100 #ifndef NDEBUG 3101 d->dbg.worker_only_inited = false; 3102 #endif /* ! NDEBUG */ 3103 } 3104 3105 3106 /** 3107 * Initialise worker daemon (the only daemon or member of the worker pool) 3108 * worker-specific daemon data, individual thread data and finish events 3109 * initialising. 3110 * To be used only with non-master daemons. 3111 * Do not start the thread even if configured for the internal threads. 3112 * @param d the daemon object 3113 * @return #MHD_SC_OK on success, 3114 * the error code otherwise 3115 */ 3116 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3117 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3118 init_worker (struct MHD_Daemon *restrict d, 3119 struct DaemonOptions *restrict s) 3120 { 3121 enum MHD_StatusCode res; 3122 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3123 3124 res = init_worker_only_data (d); 3125 3126 if (MHD_SC_OK == res) 3127 { 3128 res = init_individual_thread_data_events_conns (d, 3129 s); 3130 3131 if (MHD_SC_OK == res) 3132 return MHD_SC_OK; 3133 3134 /* Below is a clean-up path */ 3135 3136 deinit_worker_only_data (d); 3137 } 3138 3139 mhd_assert (MHD_SC_OK != res); 3140 3141 return res; 3142 } 3143 3144 3145 /** 3146 * De-initialise worker daemon (the only daemon or member of the worker pool) 3147 * worker-specific daemon data, individual thread data and finish events 3148 * initialising. 3149 * To be used only with non-master daemons. 3150 * The internal thread (is any) must be stopped already. 3151 * @param d the daemon object 3152 */ 3153 static MHD_FN_PAR_NONNULL_ (1) void 3154 deinit_worker (struct MHD_Daemon *restrict d) 3155 { 3156 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3157 3158 deinit_individual_thread_data_events_conns (d); 3159 3160 deinit_worker_only_data (d); 3161 } 3162 3163 3164 /** 3165 * Set the maximum number of handled connections for the daemon. 3166 * Works only for global limit, does not work for the worker daemon. 3167 * @param d the daemon object 3168 * @param s the user settings 3169 * @return #MHD_SC_OK on success, 3170 * the error code otherwise 3171 */ 3172 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3173 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3174 set_connections_total_limits (struct MHD_Daemon *restrict d, 3175 struct DaemonOptions *restrict s) 3176 { 3177 unsigned int limit_by_conf; 3178 unsigned int limit_by_num; 3179 unsigned int limit_by_select; 3180 unsigned int resulting_limit; 3181 bool error_by_fd_setsize; 3182 unsigned int num_worker_daemons; 3183 3184 mhd_assert (! mhd_D_HAS_MASTER (d)); 3185 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3186 3187 num_worker_daemons = 1; 3188 #ifdef MHD_SUPPORT_THREADS 3189 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int) 3190 { 3191 mhd_assert (MHD_WM_WORKER_THREADS == s->work_mode.mode); 3192 if ((0 != s->global_connection_limit) && 3193 (0 != s->work_mode.params.num_worker_threads) && 3194 (s->global_connection_limit < s->work_mode.params.num_worker_threads)) 3195 { 3196 mhd_LOG_MSG ( \ 3197 d, MHD_SC_CONFIGURATION_CONN_LIMIT_TOO_SMALL, \ 3198 "The limit specified by MHD_D_O_GLOBAL_CONNECTION_LIMIT is smaller " \ 3199 "then the number of worker threads."); 3200 return MHD_SC_CONFIGURATION_CONN_LIMIT_TOO_SMALL; 3201 } 3202 } 3203 if (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)) 3204 num_worker_daemons = s->work_mode.params.num_worker_threads; 3205 #endif /* MHD_SUPPORT_THREADS */ 3206 3207 limit_by_conf = s->global_connection_limit; 3208 limit_by_num = UINT_MAX; 3209 limit_by_select = UINT_MAX; 3210 mhd_assert (UINT_MAX == limit_by_num); /* Mute analyser warning */ 3211 3212 error_by_fd_setsize = false; 3213 #ifdef MHD_SOCKETS_KIND_POSIX 3214 if (1) 3215 { 3216 limit_by_num = (unsigned int) d->net.cfg.max_fd_num; 3217 if (0 != limit_by_num) 3218 { 3219 /* Find the upper limit. 3220 The real limit is lower, as any other process FDs will use the slots 3221 in the allowed numbers range */ 3222 limit_by_num -= 3; /* The numbers zero, one and two are used typically */ 3223 #ifdef MHD_SUPPORT_THREADS 3224 limit_by_num -= mhd_ITC_NUM_FDS * num_worker_daemons; 3225 #endif /* MHD_SUPPORT_THREADS */ 3226 if (MHD_INVALID_SOCKET != d->net.listen.fd) 3227 --limit_by_num; /* One FD is used for the listening socket */ 3228 if ((num_worker_daemons > limit_by_num) || 3229 (limit_by_num > (unsigned int) d->net.cfg.max_fd_num) /* Underflow */) 3230 { 3231 if (d->net.cfg.max_fd_num == s->fd_number_limit) 3232 { 3233 mhd_LOG_MSG ( \ 3234 d, MHD_SC_MAX_FD_NUMBER_LIMIT_TOO_STRICT, \ 3235 "The limit specified by MHD_D_O_FD_NUMBER_LIMIT is too strict " \ 3236 "for this daemon settings."); 3237 return MHD_SC_MAX_FD_NUMBER_LIMIT_TOO_STRICT; 3238 } 3239 else 3240 { 3241 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 3242 error_by_fd_setsize = true; 3243 } 3244 } 3245 } 3246 else 3247 limit_by_num = (unsigned int) INT_MAX; 3248 } 3249 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 3250 if (1) 3251 { 3252 #ifdef MHD_SUPPORT_SELECT 3253 if ((mhd_DAEMON_TYPE_SINGLE == d->threading.d_type) && 3254 (mhd_POLL_TYPE_SELECT == d->events.poll_type)) 3255 { 3256 /* W32 limits the total number (count) of sockets used for select() */ 3257 unsigned int limit_per_worker; 3258 3259 limit_per_worker = FD_SETSIZE; 3260 if (MHD_INVALID_SOCKET != d->net.listen.fd) 3261 --limit_per_worker; /* The slot for the listening socket */ 3262 #ifdef MHD_SUPPORT_THREADS 3263 --limit_per_worker; /* The slot for the ITC */ 3264 #endif /* MHD_SUPPORT_THREADS */ 3265 if ((0 == limit_per_worker) || (limit_per_worker > FD_SETSIZE)) 3266 error_by_fd_setsize = true; 3267 else 3268 { 3269 limit_by_select = limit_per_worker * num_worker_daemons; 3270 if (limit_by_select / limit_per_worker != num_worker_daemons) 3271 limit_by_select = UINT_MAX; 3272 } 3273 } 3274 #endif /* MHD_SUPPORT_SELECT */ 3275 (void) 0; /* Mute compiler warning */ 3276 } 3277 #endif /* MHD_SOCKETS_KIND_POSIX */ 3278 if (error_by_fd_setsize) 3279 { 3280 mhd_LOG_MSG ( \ 3281 d, MHD_SC_SYS_FD_SETSIZE_TOO_STRICT, \ 3282 "The FD_SETSIZE is too strict to run daemon with the polling " \ 3283 "by select() and with the specified number of workers."); 3284 return MHD_SC_SYS_FD_SETSIZE_TOO_STRICT; 3285 } 3286 3287 if (0 != limit_by_conf) 3288 { 3289 /* The number has bet set explicitly */ 3290 resulting_limit = limit_by_conf; 3291 } 3292 else 3293 { 3294 /* No user configuration provided */ 3295 unsigned int suggested_limit; 3296 #ifndef MHD_SOCKETS_KIND_WINSOCK 3297 #define TYPICAL_NOFILES_LIMIT (1024) /* The usual limit for the number of open FDs */ 3298 suggested_limit = TYPICAL_NOFILES_LIMIT; 3299 suggested_limit -= 3; /* The numbers zero, one and two are used typically */ 3300 #ifdef MHD_SUPPORT_THREADS 3301 suggested_limit -= mhd_ITC_NUM_FDS * num_worker_daemons; 3302 #endif /* MHD_SUPPORT_THREADS */ 3303 if (MHD_INVALID_SOCKET != d->net.listen.fd) 3304 --suggested_limit; /* One FD is used for the listening socket */ 3305 if (suggested_limit > TYPICAL_NOFILES_LIMIT) 3306 suggested_limit = 0; /* Overflow */ 3307 #else /* MHD_SOCKETS_KIND_WINSOCK */ 3308 #ifdef _WIN64 3309 suggested_limit = 2048; 3310 #else 3311 suggested_limit = 1024; 3312 #endif 3313 #endif /* MHD_SOCKETS_KIND_WINSOCK */ 3314 if (suggested_limit < num_worker_daemons) 3315 { 3316 /* Use at least one connection for every worker daemon and 3317 let the system to restrict the new connections if they are above 3318 the system limits. */ 3319 suggested_limit = num_worker_daemons; 3320 } 3321 resulting_limit = suggested_limit; 3322 } 3323 if (resulting_limit > limit_by_num) 3324 resulting_limit = limit_by_num; 3325 3326 if (resulting_limit > limit_by_select) 3327 resulting_limit = limit_by_select; 3328 3329 mhd_assert (resulting_limit >= num_worker_daemons); 3330 d->conns.cfg.count_limit = resulting_limit; 3331 if (d->conns.cfg.per_ip_limit <= d->conns.cfg.count_limit) 3332 d->conns.cfg.per_ip_limit = 0; /* Already enforced by global limit */ 3333 3334 return MHD_SC_OK; 3335 } 3336 3337 3338 /** 3339 * Set correct daemon threading type. 3340 * Set the number of workers for thread pool type. 3341 * @param d the daemon object 3342 * @return #MHD_SC_OK on success, 3343 * the error code otherwise 3344 */ 3345 MHD_FN_PAR_NONNULL_ (1) \ 3346 MHD_FN_MUST_CHECK_RESULT_ static inline enum MHD_StatusCode 3347 set_d_threading_type (struct MHD_Daemon *restrict d) 3348 { 3349 switch (d->wmode_int) 3350 { 3351 case mhd_WM_INT_EXTERNAL_EVENTS_EDGE: 3352 case mhd_WM_INT_EXTERNAL_EVENTS_LEVEL: 3353 mhd_assert (! mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3354 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 3355 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 3356 #ifdef MHD_SUPPORT_THREADS 3357 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 3358 #endif /* MHD_SUPPORT_THREADS */ 3359 return MHD_SC_OK; 3360 case mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS: 3361 mhd_assert (! mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3362 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 3363 #ifdef MHD_SUPPORT_THREADS 3364 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 3365 #endif /* MHD_SUPPORT_THREADS */ 3366 return MHD_SC_OK; 3367 #ifdef MHD_SUPPORT_THREADS 3368 case mhd_WM_INT_INTERNAL_EVENTS_ONE_THREAD: 3369 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3370 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 3371 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 3372 return MHD_SC_OK; 3373 case mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION: 3374 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3375 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 3376 mhd_assert (! mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type)); 3377 mhd_assert (! mhd_POLL_TYPE_INT_IS_KQUEUE (d->events.poll_type)); 3378 d->threading.d_type = mhd_DAEMON_TYPE_LISTEN_ONLY; 3379 return MHD_SC_OK; 3380 case mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL: 3381 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3382 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 3383 d->threading.d_type = mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY; 3384 return MHD_SC_OK; 3385 #endif /* MHD_SUPPORT_THREADS */ 3386 default: 3387 break; 3388 } 3389 mhd_UNREACHABLE (); 3390 return MHD_SC_INTERNAL_ERROR; 3391 } 3392 3393 3394 #ifdef MHD_SUPPORT_THREADS 3395 3396 /** 3397 * De-initialise workers pool, including workers daemons. 3398 * The threads must be not running. 3399 * @param d the daemon object 3400 * @param num_workers the number of workers to deinit 3401 */ 3402 static MHD_FN_PAR_NONNULL_ (1) void 3403 deinit_workers_pool (struct MHD_Daemon *restrict d, 3404 unsigned int num_workers) 3405 { 3406 unsigned int i; 3407 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3408 mhd_assert (NULL != d->threading.hier.pool.workers); 3409 mhd_assert ((2 <= d->threading.hier.pool.num) || \ 3410 (mhd_DAEMON_STATE_STARTING == d->state)); 3411 mhd_assert ((num_workers == d->threading.hier.pool.num) || \ 3412 (mhd_DAEMON_STATE_STARTING == d->state)); 3413 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3414 (mhd_DAEMON_STATE_STARTING == d->state)); 3415 3416 /* Deinitialise in reverse order */ 3417 for (i = num_workers - 1; num_workers > i; --i) 3418 { /* Note: loop exits after underflow of 'i' */ 3419 struct MHD_Daemon *const worker = d->threading.hier.pool.workers + i; 3420 deinit_worker (worker); 3421 } 3422 free (d->threading.hier.pool.workers); 3423 #ifndef NDEBUG 3424 d->dbg.thread_pool_inited = false; 3425 #endif 3426 } 3427 3428 3429 /** 3430 * Nullify worker daemon member that copied from master daemon but must not 3431 * be used in worker 3432 * @param d the daemon object 3433 */ 3434 static MHD_FN_PAR_NONNULL_ (1) void 3435 reset_master_only_areas (struct MHD_Daemon *restrict d) 3436 { 3437 #ifdef MHD_SUPPORT_EPOLL 3438 /* In release builds this is done mostly for safety as @a early_fd is used 3439 in workers only for asserts */ 3440 if (mhd_POLL_TYPE_EPOLL == d->events.poll_type) 3441 d->events.data.epoll.early_fd = MHD_INVALID_SOCKET; 3442 #endif /* MHD_SUPPORT_EPOLL */ 3443 3444 #ifdef MHD_SUPPORT_AUTH_DIGEST 3445 memset (&(d->auth_dg.nonces_lock), 3446 0x7F, 3447 sizeof(d->auth_dg.nonces_lock)); 3448 #endif 3449 /* Not needed. It is initialised later */ 3450 /* memset (&(d->req_cfg.large_buf), 0, sizeof(d->req_cfg.large_buf)); */ 3451 (void) d; 3452 } 3453 3454 3455 /** 3456 * Initialise workers pool, including workers daemons. 3457 * Do not start the threads. 3458 * @param d the daemon object 3459 * @param s the user settings 3460 * @return #MHD_SC_OK on success, 3461 * the error code otherwise 3462 */ 3463 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3464 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3465 init_workers_pool (struct MHD_Daemon *restrict d, 3466 struct DaemonOptions *restrict s) 3467 { 3468 enum MHD_StatusCode res; 3469 size_t workers_pool_size; 3470 unsigned int conn_per_daemon; 3471 unsigned int num_workers; 3472 unsigned int conn_remainder; 3473 unsigned int i; 3474 3475 mhd_assert (d->dbg.net_inited); 3476 mhd_assert (! d->dbg.net_deinited); 3477 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int); 3478 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3479 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET < d->events.poll_type); 3480 mhd_assert (1 < s->work_mode.params.num_worker_threads); 3481 mhd_assert (0 != d->conns.cfg.count_limit); 3482 mhd_assert (s->work_mode.params.num_worker_threads <= \ 3483 d->conns.cfg.count_limit); 3484 mhd_assert (! d->dbg.thread_pool_inited); 3485 3486 num_workers = s->work_mode.params.num_worker_threads; 3487 workers_pool_size = 3488 (sizeof(struct MHD_Daemon) * num_workers); 3489 if (workers_pool_size / num_workers != sizeof(struct MHD_Daemon)) 3490 { /* Overflow */ 3491 mhd_LOG_MSG ( \ 3492 d, MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE, \ 3493 "The size of the thread pool is too large."); 3494 return MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE; 3495 } 3496 3497 #ifndef NDEBUG 3498 mhd_itc_set_invalid (&(d->threading.itc)); 3499 mhd_thread_handle_ID_set_invalid (&(d->threading.tid)); 3500 #endif 3501 3502 d->threading.hier.pool.workers = (struct MHD_Daemon *) 3503 malloc (workers_pool_size); 3504 if (NULL == d->threading.hier.pool.workers) 3505 { 3506 mhd_LOG_MSG ( \ 3507 d, MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE, \ 3508 "Failed to allocate memory for the thread pool."); 3509 return MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE; 3510 } 3511 3512 conn_per_daemon = d->conns.cfg.count_limit / num_workers; 3513 conn_remainder = d->conns.cfg.count_limit % num_workers; 3514 res = MHD_SC_OK; 3515 for (i = 0; num_workers > i; ++i) 3516 { 3517 struct MHD_Daemon *restrict const worker = 3518 d->threading.hier.pool.workers + i; 3519 memcpy (worker, d, sizeof(struct MHD_Daemon)); 3520 reset_master_only_areas (worker); 3521 3522 worker->threading.d_type = mhd_DAEMON_TYPE_WORKER; 3523 worker->threading.hier.master = d; 3524 worker->conns.cfg.count_limit = conn_per_daemon; 3525 if (conn_remainder > i) 3526 worker->conns.cfg.count_limit++; /* Distribute the reminder */ 3527 #ifdef MHD_SUPPORT_EPOLL 3528 mhd_assert ((mhd_POLL_TYPE_EPOLL != worker->events.poll_type) || 3529 (0 != i) || 3530 (0 < d->events.data.epoll.early_fd)); 3531 mhd_assert ((mhd_POLL_TYPE_EPOLL != worker->events.poll_type) || 3532 (0 == i) || 3533 (MHD_INVALID_SOCKET == d->events.data.epoll.early_fd)); 3534 #endif /* MHD_SUPPORT_EPOLL */ 3535 res = init_worker (worker, 3536 s); 3537 if (MHD_SC_OK != res) 3538 break; 3539 } 3540 if (num_workers == i) 3541 { 3542 mhd_assert (MHD_SC_OK == res); 3543 #ifndef NDEBUG 3544 d->dbg.thread_pool_inited = true; 3545 d->dbg.threading_inited = true; 3546 #endif 3547 d->threading.hier.pool.num = num_workers; 3548 return MHD_SC_OK; 3549 } 3550 3551 /* Below is a clean-up */ 3552 3553 mhd_assert (MHD_SC_OK != res); 3554 deinit_workers_pool (d, i); 3555 return res; 3556 } 3557 3558 3559 /** 3560 * Initialise data specific only for the master daemon. 3561 * @param d the daemon object 3562 * @return #MHD_SC_OK on success, 3563 * the error code otherwise 3564 */ 3565 static MHD_FN_PAR_NONNULL_ (1) 3566 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3567 init_master_only_data (struct MHD_Daemon *restrict d) 3568 { 3569 enum MHD_StatusCode res; 3570 3571 mhd_assert (mhd_D_HAS_WORKERS (d)); 3572 mhd_assert (d->dbg.net_inited); 3573 mhd_assert (! d->dbg.master_only_inited); 3574 mhd_assert (! d->dbg.worker_only_inited); 3575 3576 if (! mhd_atomic_counter_init ( \ 3577 &(d->events.act_req.ext_added.master.next_d_idx), 3578 0)) 3579 { 3580 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 3581 "Failed to initialise atomic counter for externally added " 3582 "connections"); 3583 res = MHD_SC_MUTEX_INIT_FAILURE; 3584 } 3585 else 3586 res = MHD_SC_OK; 3587 3588 #ifndef NDEBUG 3589 if (MHD_SC_OK == res) 3590 d->dbg.master_only_inited = true; 3591 #endif /* ! NDEBUG */ 3592 3593 return res; 3594 } 3595 3596 3597 /** 3598 * De-initialise data specific only for the master daemon. 3599 * @param d the daemon object 3600 */ 3601 static MHD_FN_PAR_NONNULL_ (1) void 3602 deinit_master_only_data (struct MHD_Daemon *restrict d) 3603 { 3604 mhd_assert (mhd_D_HAS_WORKERS (d)); 3605 mhd_assert (d->dbg.master_only_inited); 3606 mhd_assert (! d->dbg.worker_only_inited); 3607 3608 mhd_atomic_counter_deinit (&(d->events.act_req.ext_added.master.next_d_idx)); 3609 3610 #ifndef NDEBUG 3611 d->dbg.master_only_inited = false; 3612 #endif /* ! NDEBUG */ 3613 } 3614 3615 3616 /** 3617 * Initialise individual events, connection data for the "master" daemon, 3618 * including master-only data, the workers pool, and the workers daemons, 3619 * including individual worker-specific threading and other data. 3620 * Do not start the threads. 3621 * @param d the daemon object 3622 * @param s the user settings 3623 * @return #MHD_SC_OK on success, 3624 * the error code otherwise 3625 */ 3626 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3627 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3628 init_master (struct MHD_Daemon *restrict d, 3629 struct DaemonOptions *restrict s) 3630 { 3631 enum MHD_StatusCode res; 3632 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3633 3634 res = init_master_only_data (d); 3635 if (MHD_SC_OK != res) 3636 return res; 3637 3638 res = init_workers_pool (d, 3639 s); 3640 if (MHD_SC_OK == res) 3641 { 3642 /* Copy some settings to the master daemon */ 3643 d->conns.cfg.mem_pool_size = 3644 d->threading.hier.pool.workers[0].conns.cfg.mem_pool_size; 3645 3646 return res; 3647 } 3648 3649 /* Below is a clean-up path */ 3650 3651 deinit_master_only_data (d); 3652 3653 mhd_assert (MHD_SC_OK != res); 3654 return res; 3655 } 3656 3657 3658 /** 3659 * De-initialise individual events, connection data for the "master" daemon, 3660 * including master-only data, the workers pool, and the workers daemons, 3661 * including individual worker-specific threading and other data. 3662 * The threads must be not running. 3663 * @param d the daemon object 3664 */ 3665 static MHD_FN_PAR_NONNULL_ (1) void 3666 deinit_master (struct MHD_Daemon *restrict d) 3667 { 3668 deinit_workers_pool (d, 3669 d->threading.hier.pool.num); 3670 3671 deinit_master_only_data (d); 3672 } 3673 3674 3675 #endif /* MHD_SUPPORT_THREADS */ 3676 3677 /** 3678 * Initialise threading and inter-thread communications. 3679 * Also finish initialisation of events processing and initialise daemon's 3680 * connection data. 3681 * Do not start the thread even if configured for the internal threads. 3682 * @param d the daemon object 3683 * @param s the user settings 3684 * @return #MHD_SC_OK on success, 3685 * the error code otherwise 3686 */ 3687 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3688 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3689 daemon_init_threading_and_conn (struct MHD_Daemon *restrict d, 3690 struct DaemonOptions *restrict s) 3691 { 3692 enum MHD_StatusCode res; 3693 3694 mhd_assert (d->dbg.net_inited); 3695 mhd_assert (! d->dbg.net_deinited); 3696 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 3697 3698 res = set_d_threading_type (d); 3699 if (MHD_SC_OK != res) 3700 return res; 3701 3702 res = set_connections_total_limits (d, s); 3703 if (MHD_SC_OK != res) 3704 return res; 3705 3706 #ifdef MHD_SUPPORT_THREADS 3707 d->threading.cfg.stack_size = s->stack_size; 3708 #endif /* MHD_SUPPORT_THREADS */ 3709 3710 if (! mhd_D_HAS_WORKERS (d)) 3711 res = init_worker (d, 3712 s); 3713 #ifdef MHD_SUPPORT_THREADS 3714 else 3715 res = init_master (d, 3716 s); 3717 #endif /* MHD_SUPPORT_THREADS */ 3718 3719 if (MHD_SC_OK == res) 3720 { 3721 mhd_assert (d->dbg.events_allocated || \ 3722 mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3723 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type) || \ 3724 ! d->dbg.events_allocated); 3725 mhd_assert (! d->dbg.thread_pool_inited || \ 3726 mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3727 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type) || \ 3728 d->dbg.thread_pool_inited); 3729 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3730 mhd_assert (! d->dbg.events_allocated || d->dbg.connections_inited); 3731 mhd_assert (! d->dbg.connections_inited || d->dbg.events_allocated); 3732 } 3733 return res; 3734 } 3735 3736 3737 /** 3738 * De-initialise threading and inter-thread communications. 3739 * Also deallocate events and de-initialise daemon's connection data. 3740 * No daemon-manged threads should be running. 3741 * @param d the daemon object 3742 */ 3743 static MHD_FN_PAR_NONNULL_ (1) void 3744 daemon_deinit_threading_and_conn (struct MHD_Daemon *restrict d) 3745 { 3746 mhd_assert (d->dbg.net_inited); 3747 mhd_assert (! d->dbg.net_deinited); 3748 mhd_assert (d->dbg.threading_inited); 3749 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3750 if (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)) 3751 { 3752 mhd_assert (! mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)); 3753 mhd_assert (d->dbg.connections_inited); 3754 mhd_assert (d->dbg.events_allocated); 3755 mhd_assert (! d->dbg.thread_pool_inited); 3756 deinit_worker (d); 3757 } 3758 else 3759 { 3760 #ifdef MHD_SUPPORT_THREADS 3761 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int); 3762 mhd_assert (! d->dbg.connections_inited); 3763 mhd_assert (! d->dbg.events_allocated); 3764 mhd_assert (d->dbg.thread_pool_inited); 3765 deinit_master (d); 3766 #else /* ! MHD_SUPPORT_THREADS */ 3767 mhd_assert (0 && "Impossible value"); 3768 mhd_UNREACHABLE (); 3769 (void) 0; 3770 #endif /* ! MHD_SUPPORT_THREADS */ 3771 } 3772 } 3773 3774 3775 #ifdef MHD_SUPPORT_THREADS 3776 3777 /** 3778 * Start the daemon individual single thread. 3779 * Works both for single thread daemons and for worker daemon for thread 3780 * pool mode. 3781 * Must be called only for daemons with internal threads. 3782 * @param d the daemon object, must be completely initialised 3783 * @return #MHD_SC_OK on success, 3784 * the error code otherwise 3785 */ 3786 static MHD_FN_PAR_NONNULL_ (1) 3787 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3788 start_individual_daemon_thread (struct MHD_Daemon *restrict d) 3789 { 3790 mhd_assert (d->dbg.threading_inited); 3791 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3792 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3793 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3794 mhd_assert (! mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3795 3796 if (mhd_DAEMON_TYPE_SINGLE == d->threading.d_type) 3797 { 3798 if (! mhd_create_named_thread ( \ 3799 &(d->threading.tid), "MHD-single", \ 3800 d->threading.cfg.stack_size, \ 3801 &mhd_worker_all_events, \ 3802 (void*) d)) 3803 { 3804 mhd_LOG_MSG (d, MHD_SC_THREAD_MAIN_LAUNCH_FAILURE, \ 3805 "Failed to start daemon main thread."); 3806 return MHD_SC_THREAD_MAIN_LAUNCH_FAILURE; 3807 } 3808 } 3809 else if (mhd_DAEMON_TYPE_WORKER == d->threading.d_type) 3810 { 3811 if (! mhd_create_named_thread ( \ 3812 &(d->threading.tid), "MHD-worker", \ 3813 d->threading.cfg.stack_size, \ 3814 &mhd_worker_all_events, \ 3815 (void*) d)) 3816 { 3817 mhd_LOG_MSG (d, MHD_SC_THREAD_WORKER_LAUNCH_FAILURE, \ 3818 "Failed to start daemon worker thread."); 3819 return MHD_SC_THREAD_WORKER_LAUNCH_FAILURE; 3820 } 3821 } 3822 else if (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type) 3823 { 3824 if (! mhd_create_named_thread ( \ 3825 &(d->threading.tid), "MHD-listen", \ 3826 d->threading.cfg.stack_size, \ 3827 &mhd_worker_listening_only, \ 3828 (void*) d)) 3829 { 3830 mhd_LOG_MSG (d, MHD_SC_THREAD_LISTENING_LAUNCH_FAILURE, \ 3831 "Failed to start daemon listening thread."); 3832 return MHD_SC_THREAD_LISTENING_LAUNCH_FAILURE; 3833 } 3834 } 3835 else 3836 { 3837 mhd_assert (0 && "Impossible value"); 3838 mhd_UNREACHABLE (); 3839 return MHD_SC_INTERNAL_ERROR; 3840 } 3841 mhd_assert (mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3842 return MHD_SC_OK; 3843 } 3844 3845 3846 /** 3847 * Stop the daemon individual single thread. 3848 * Works both for single thread daemons and for worker daemon for thread 3849 * pool mode. 3850 * Must be called only for daemons with internal threads. 3851 * @param d the daemon object, must be completely initialised 3852 */ 3853 MHD_FN_PAR_NONNULL_ (1) static void 3854 stop_individual_daemon_thread (struct MHD_Daemon *restrict d) 3855 { 3856 mhd_assert (d->dbg.threading_inited); 3857 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3858 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3859 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3860 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3861 (mhd_DAEMON_STATE_STARTING == d->state)); 3862 mhd_assert (mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3863 3864 d->threading.stop_requested = true; 3865 3866 mhd_daemon_trigger_itc (d); 3867 if (! mhd_thread_handle_ID_join_thread (d->threading.tid)) 3868 { 3869 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_ERROR, \ 3870 "Failed to stop daemon main thread."); 3871 } 3872 } 3873 3874 3875 /** 3876 * Stop all worker threads in the thread pool. 3877 * Must be called only for master daemons with thread pool. 3878 * @param d the daemon object, the workers threads must be running 3879 * @param num_workers the number of threads to stop 3880 */ 3881 static MHD_FN_PAR_NONNULL_ (1) void 3882 stop_worker_pool_threads (struct MHD_Daemon *restrict d, 3883 unsigned int num_workers) 3884 { 3885 unsigned int i; 3886 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3887 mhd_assert (NULL != d->threading.hier.pool.workers); 3888 mhd_assert (0 != d->threading.hier.pool.num); 3889 mhd_assert (d->dbg.thread_pool_inited); 3890 mhd_assert (2 <= d->threading.hier.pool.num); 3891 mhd_assert ((num_workers == d->threading.hier.pool.num) || \ 3892 (mhd_DAEMON_STATE_STARTING == d->state)); 3893 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3894 (mhd_DAEMON_STATE_STARTING == d->state)); 3895 3896 /* Process all the threads in the reverse order */ 3897 3898 /* Trigger all threads */ 3899 for (i = num_workers - 1; num_workers > i; --i) 3900 { /* Note: loop exits after underflow of 'i' */ 3901 d->threading.hier.pool.workers[i].threading.stop_requested = true; 3902 mhd_assert (mhd_ITC_IS_VALID ( \ 3903 d->threading.hier.pool.workers[i].threading.itc)); 3904 mhd_daemon_trigger_itc (d->threading.hier.pool.workers + i); 3905 } 3906 3907 /* Collect all threads */ 3908 for (i = num_workers - 1; num_workers > i; --i) 3909 { /* Note: loop exits after underflow of 'i' */ 3910 struct MHD_Daemon *const restrict worker = 3911 d->threading.hier.pool.workers + i; 3912 mhd_assert (mhd_thread_handle_ID_is_valid_handle (worker->threading.tid)); 3913 if (! mhd_thread_handle_ID_join_thread (worker->threading.tid)) 3914 { 3915 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_ERROR, \ 3916 "Failed to stop a worker thread."); 3917 } 3918 } 3919 } 3920 3921 3922 /** 3923 * Start the workers pool threads. 3924 * Must be called only for master daemons with thread pool. 3925 * @param d the daemon object, must be completely initialised 3926 * @return #MHD_SC_OK on success, 3927 * the error code otherwise 3928 */ 3929 static MHD_FN_PAR_NONNULL_ (1) 3930 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3931 start_worker_pool_threads (struct MHD_Daemon *restrict d) 3932 { 3933 enum MHD_StatusCode res; 3934 unsigned int i; 3935 3936 mhd_assert (d->dbg.threading_inited); 3937 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3938 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3939 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3940 mhd_assert (d->dbg.thread_pool_inited); 3941 mhd_assert (2 <= d->threading.hier.pool.num); 3942 3943 res = MHD_SC_OK; 3944 3945 for (i = 0; d->threading.hier.pool.num > i; ++i) 3946 { 3947 res = start_individual_daemon_thread (d->threading.hier.pool.workers + i); 3948 if (MHD_SC_OK != res) 3949 break; 3950 } 3951 if (d->threading.hier.pool.num == i) 3952 { 3953 mhd_assert (MHD_SC_OK == res); 3954 return MHD_SC_OK; 3955 } 3956 3957 stop_worker_pool_threads (d, i); 3958 mhd_assert (MHD_SC_OK != res); 3959 return res; 3960 } 3961 3962 3963 #endif /* MHD_SUPPORT_THREADS */ 3964 3965 /** 3966 * Start the daemon internal threads, if the daemon configured to use them. 3967 * @param d the daemon object, must be completely initialised 3968 * @return #MHD_SC_OK on success, 3969 * the error code otherwise 3970 */ 3971 static MHD_FN_PAR_NONNULL_ (1) 3972 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3973 daemon_start_threads (struct MHD_Daemon *restrict d) 3974 { 3975 mhd_assert (d->dbg.net_inited); 3976 mhd_assert (! d->dbg.net_deinited); 3977 mhd_assert (d->dbg.threading_inited); 3978 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3979 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3980 { 3981 #ifdef MHD_SUPPORT_THREADS 3982 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL != d->wmode_int) 3983 { 3984 mhd_assert (d->dbg.threading_inited); 3985 mhd_assert (mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY != d->threading.d_type); 3986 return start_individual_daemon_thread (d); 3987 } 3988 else 3989 { 3990 mhd_assert (d->dbg.thread_pool_inited); 3991 mhd_assert (mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY == d->threading.d_type); 3992 return start_worker_pool_threads (d); 3993 } 3994 #else /* ! MHD_SUPPORT_THREADS */ 3995 mhd_assert (0 && "Impossible value"); 3996 mhd_UNREACHABLE (); 3997 return MHD_SC_INTERNAL_ERROR; 3998 #endif /* ! MHD_SUPPORT_THREADS */ 3999 } 4000 return MHD_SC_OK; 4001 } 4002 4003 4004 /** 4005 * Stop the daemon internal threads, if the daemon configured to use them. 4006 * @param d the daemon object to stop threads 4007 */ 4008 static MHD_FN_PAR_NONNULL_ (1) void 4009 daemon_stop_threads (struct MHD_Daemon *restrict d) 4010 { 4011 mhd_assert (d->dbg.net_inited); 4012 mhd_assert (! d->dbg.net_deinited); 4013 mhd_assert (d->dbg.threading_inited); 4014 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 4015 { 4016 #ifdef MHD_SUPPORT_THREADS 4017 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL != d->wmode_int) 4018 { 4019 mhd_assert (d->dbg.threading_inited); 4020 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 4021 stop_individual_daemon_thread (d); 4022 return; 4023 } 4024 else 4025 { 4026 mhd_assert (d->dbg.thread_pool_inited); 4027 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 4028 stop_worker_pool_threads (d, d->threading.hier.pool.num); 4029 return; 4030 } 4031 #else /* ! MHD_SUPPORT_THREADS */ 4032 mhd_UNREACHABLE (); 4033 return; 4034 #endif /* ! MHD_SUPPORT_THREADS */ 4035 } 4036 } 4037 4038 4039 /** 4040 * Close all daemon connections for modes without internal threads 4041 * @param d the daemon object 4042 */ 4043 static MHD_FN_PAR_NONNULL_ (1) void 4044 daemon_close_connections (struct MHD_Daemon *restrict d) 4045 { 4046 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 4047 { 4048 /* In these modes connections must be closed in the daemon thread */ 4049 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)); 4050 return; 4051 } 4052 4053 mhd_daemon_close_all_conns (d); 4054 } 4055 4056 4057 /** 4058 * Internal daemon initialisation function. 4059 * This function calls all required initialisation stages one-by-one. 4060 * @param d the daemon object 4061 * @param s the user settings 4062 * @return #MHD_SC_OK on success, 4063 * the error code otherwise 4064 */ 4065 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 4066 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 4067 daemon_start_internal (struct MHD_Daemon *restrict d, 4068 struct DaemonOptions *restrict s) 4069 { 4070 enum MHD_StatusCode res; 4071 4072 res = daemon_set_basic_settings (d, s); 4073 if (MHD_SC_OK != res) 4074 return res; 4075 4076 res = daemon_set_work_mode (d, s); 4077 if (MHD_SC_OK != res) 4078 return res; 4079 4080 res = daemon_init_net (d, s); 4081 if (MHD_SC_OK != res) 4082 return res; 4083 4084 mhd_assert (d->dbg.net_inited); 4085 4086 res = daemon_init_auth_digest (d, s); 4087 4088 if (MHD_SC_OK == res) 4089 { 4090 res = daemon_init_tls (d, s); 4091 if (MHD_SC_OK == res) 4092 { 4093 mhd_assert (d->dbg.tls_inited); 4094 res = daemon_init_threading_and_conn (d, s); 4095 if (MHD_SC_OK == res) 4096 { 4097 mhd_assert (d->dbg.threading_inited); 4098 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 4099 4100 res = daemon_init_large_buf (d, s); 4101 if (MHD_SC_OK == res) 4102 { 4103 res = daemon_start_threads (d); 4104 if (MHD_SC_OK == res) 4105 { 4106 return MHD_SC_OK; 4107 } 4108 4109 /* Below is a clean-up path */ 4110 daemon_deinit_large_buf (d); 4111 } 4112 daemon_deinit_threading_and_conn (d); 4113 } 4114 daemon_deinit_tls (d); 4115 } 4116 daemon_deinit_auth_digest (d); 4117 } 4118 daemon_deinit_net (d); 4119 mhd_assert (MHD_SC_OK != res); 4120 return res; 4121 } 4122 4123 4124 MHD_EXTERN_ 4125 MHD_FN_PAR_NONNULL_ (1) MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 4126 MHD_daemon_start (struct MHD_Daemon *daemon) 4127 { 4128 struct MHD_Daemon *const d = daemon; /* a short alias */ 4129 struct DaemonOptions *const s = daemon->settings; /* a short alias */ 4130 enum MHD_StatusCode res; 4131 4132 if (mhd_DAEMON_STATE_NOT_STARTED != daemon->state) 4133 return MHD_SC_TOO_LATE; 4134 4135 mhd_assert (NULL != s); 4136 4137 d->state = mhd_DAEMON_STATE_STARTING; 4138 res = daemon_start_internal (d, s); 4139 4140 d->settings = NULL; 4141 dsettings_release (s); 4142 4143 d->state = 4144 (MHD_SC_OK == res) ? mhd_DAEMON_STATE_STARTED : mhd_DAEMON_STATE_FAILED; 4145 4146 return res; 4147 } 4148 4149 4150 MHD_EXTERN_ MHD_FN_PAR_NONNULL_ALL_ void 4151 MHD_daemon_destroy (struct MHD_Daemon *daemon) 4152 { 4153 bool not_yet_started = (mhd_DAEMON_STATE_NOT_STARTED == daemon->state); 4154 bool has_failed = (mhd_DAEMON_STATE_FAILED == daemon->state); 4155 mhd_assert (mhd_DAEMON_STATE_STOPPING > daemon->state); 4156 mhd_assert (mhd_DAEMON_STATE_STARTING != daemon->state); 4157 4158 daemon->state = mhd_DAEMON_STATE_STOPPING; 4159 if (not_yet_started) 4160 { 4161 mhd_assert (NULL != daemon->settings); 4162 dsettings_release (daemon->settings); 4163 } 4164 else if (! has_failed) 4165 { 4166 mhd_assert (NULL == daemon->settings); 4167 mhd_assert (daemon->dbg.threading_inited); 4168 4169 daemon_stop_threads (daemon); 4170 4171 daemon_close_connections (daemon); 4172 4173 daemon_deinit_threading_and_conn (daemon); 4174 4175 daemon_deinit_large_buf (daemon); 4176 4177 daemon_deinit_tls (daemon); 4178 4179 daemon_deinit_auth_digest (daemon); 4180 4181 daemon_deinit_net (daemon); 4182 } 4183 daemon->state = mhd_DAEMON_STATE_STOPPED; /* Useful only for debugging */ 4184 4185 free (daemon); 4186 4187 mhd_lib_deinit_global_if_needed (); 4188 }