libmicrohttpd2

HTTP server C library (MHD 2.x, alpha)
Log | Files | Refs | README | LICENSE

events_process.c (87115B)


      1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
      2 /*
      3   This file is part of GNU libmicrohttpd.
      4   Copyright (C) 2024-2026 Evgeny Grin (Karlson2k)
      5 
      6   GNU libmicrohttpd is free software; you can redistribute it and/or
      7   modify it under the terms of the GNU Lesser General Public
      8   License as published by the Free Software Foundation; either
      9   version 2.1 of the License, or (at your option) any later version.
     10 
     11   GNU libmicrohttpd is distributed in the hope that it will be useful,
     12   but WITHOUT ANY WARRANTY; without even the implied warranty of
     13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14   Lesser General Public License for more details.
     15 
     16   Alternatively, you can redistribute GNU libmicrohttpd and/or
     17   modify it under the terms of the GNU General Public License as
     18   published by the Free Software Foundation; either version 2 of
     19   the License, or (at your option) any later version, together
     20   with the eCos exception, as follows:
     21 
     22     As a special exception, if other files instantiate templates or
     23     use macros or inline functions from this file, or you compile this
     24     file and link it with other works to produce a work based on this
     25     file, this file does not by itself cause the resulting work to be
     26     covered by the GNU General Public License. However the source code
     27     for this file must still be made available in accordance with
     28     section (3) of the GNU General Public License v2.
     29 
     30     This exception does not invalidate any other reasons why a work
     31     based on this file might be covered by the GNU General Public
     32     License.
     33 
     34   You should have received copies of the GNU Lesser General Public
     35   License and the GNU General Public License along with this library;
     36   if not, see <https://www.gnu.org/licenses/>.
     37 */
     38 
     39 /**
     40  * @file src/mhd2/events_process.c
     41  * @brief  The implementation of events processing functions
     42  * @author Karlson2k (Evgeny Grin)
     43  */
     44 
     45 #include "mhd_sys_options.h"
     46 #include "events_process.h"
     47 
     48 #include "mhd_assert.h"
     49 #include "mhd_unreachable.h"
     50 
     51 #include "mhd_predict.h"
     52 
     53 #if defined(MHD_USE_TRACE_SUSPEND_RESUME) || defined(MHD_USE_TRACE_POLLING_FDS)
     54 #  include <stdio.h>
     55 #  include <string.h>
     56 #endif /* MHD_USE_TRACE_SUSPEND_RESUME || MHD_USE_TRACE_POLLING_FDS */
     57 
     58 #include "mhd_locks.h"
     59 
     60 #include "mhd_socket_type.h"
     61 #include "sys_poll.h"
     62 #include "sys_select.h"
     63 #ifdef MHD_SUPPORT_EPOLL
     64 #  include <sys/epoll.h>
     65 #endif
     66 #include "sys_kqueue.h"
     67 #ifdef MHD_SOCKETS_KIND_POSIX
     68 #  include "sys_errno.h"
     69 #endif
     70 
     71 #include "mhd_itc.h"
     72 
     73 #include "mhd_panic.h"
     74 #include "mhd_dbg_print.h"
     75 
     76 #include "mhd_sockets_macros.h"
     77 #include "mhd_socket_error_funcs.h"
     78 
     79 #include "mhd_daemon.h"
     80 #include "mhd_connection.h"
     81 
     82 #include "mhd_mono_clock.h"
     83 
     84 #include "conn_timeout.h"
     85 #include "conn_mark_ready.h"
     86 #include "daemon_logger.h"
     87 #include "daemon_add_conn.h"
     88 #include "daemon_funcs.h"
     89 #include "conn_data_process.h"
     90 #include "stream_funcs.h"
     91 #include "extr_events_funcs.h"
     92 
     93 #ifdef MHD_SUPPORT_UPGRADE
     94 #  include "upgrade_proc.h"
     95 #endif /* MHD_SUPPORT_UPGRADE */
     96 
     97 #ifdef MHD_SUPPORT_HTTPS
     98 #  include "mhd_tls_funcs.h"
     99 #endif
    100 
    101 #ifdef MHD_SUPPORT_HTTP2
    102 #  include "h2/h2_comm.h"
    103 #endif
    104 
    105 #include "mhd_public_api.h"
    106 
    107 #ifdef MHD_USE_TRACE_POLLING_FDS
    108 /**
    109  * Debug-printf request of FD polling/monitoring
    110  * @param fd_name the name of FD ("ITC", "lstn" or "conn")
    111  * @param fd the FD value
    112  * @param r_ready the request for read (or receive) readiness
    113  * @param w_ready the request for write (or send) readiness
    114  * @param e_ready the request for exception (or error) readiness
    115  */
    116 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void
    117 mhd_dbg_print_fd_mon_req (const char *fd_name,
    118                           MHD_Socket fd,
    119                           bool r_ready,
    120                           bool w_ready,
    121                           bool e_ready)
    122 {
    123   char state_str[] = "x:x:x";
    124   state_str[0] = r_ready ? 'R' : '-';
    125   state_str[2] = w_ready ? 'W' : '-';
    126   state_str[4] = e_ready ? 'E' : '-';
    127 
    128   fprintf (stderr,
    129            "### Set FD watching: %4s [%2llu] for %s\n",
    130            fd_name,
    131            (unsigned long long) fd,
    132            state_str);
    133 }
    134 
    135 
    136 /**
    137  * Debug-printf reported (by polling) status of FD
    138  * @param fd_name the name of FD ("ITC", "lstn" or "conn")
    139  * @param fd the FD value
    140  * @param r_ready the read (or receive) readiness
    141  * @param w_ready the write (or send) readiness
    142  * @param e_ready the exception (or error) readiness
    143  */
    144 static MHD_FN_PAR_NONNULL_ALL_ void
    145 dbg_print_fd_state_update (const char *fd_name,
    146                            MHD_Socket fd,
    147                            bool r_ready,
    148                            bool w_ready,
    149                            bool e_ready)
    150 {
    151   char state_str[] = "x:x:x";
    152   state_str[0] = r_ready ? 'R' : '-';
    153   state_str[2] = w_ready ? 'W' : '-';
    154   state_str[4] = e_ready ? 'E' : '-';
    155 
    156   fprintf (stderr,
    157            "### FD state update: %4s [%2llu]  -> %s\n",
    158            fd_name,
    159            (unsigned long long) fd,
    160            state_str);
    161 }
    162 
    163 
    164 #  ifdef MHD_SUPPORT_KQUEUE
    165 
    166 static const char *
    167 mhd_dbg_kefilter_to_name (const struct kevent *ke)
    168 {
    169   switch (ke->filter)
    170   {
    171   case EVFILT_READ:
    172     return "READ ";
    173   case EVFILT_WRITE:
    174     return "WRITE";
    175   default:
    176     break;
    177   }
    178   return "OTHER";
    179 }
    180 
    181 
    182 #define mhd_DBG_KEFLAGS_BUF_SIZE    512
    183 
    184 static void
    185 mdd_dbg_keflags_to_text (const struct kevent *ke,
    186                          char buf[mhd_DBG_KEFLAGS_BUF_SIZE])
    187 {
    188   static const size_t buf_size = mhd_DBG_KEFLAGS_BUF_SIZE;
    189   size_t len = 0u;
    190   const unsigned int keflags = ke->flags;
    191   unsigned int extra_flags;
    192   buf[0] = '\0';
    193 
    194   if (0 != (EV_ADD & keflags))
    195     strcat (buf, "ADD|");
    196   if (0 != (EV_ENABLE & keflags))
    197     strcat (buf, "ENABLE|");
    198   if (0 != (EV_DISABLE & keflags))
    199     strcat (buf, "DISABLE|");
    200   if (0 != (EV_DISPATCH & keflags))
    201     strcat (buf, "DISPATCH|");
    202   if (0 != (EV_DELETE & keflags))
    203     strcat (buf, "DELETE|");
    204   if (0 != (EV_RECEIPT & keflags))
    205     strcat (buf, "RECEIPT|");
    206   if (0 != (EV_ONESHOT & keflags))
    207     strcat (buf, "ONESHOT|");
    208   if (0 != (EV_CLEAR & keflags))
    209     strcat (buf, "CLEAR|");
    210   if (0 != (EV_EOF & keflags))
    211     strcat (buf, "EOF|");
    212   if (0 != (EV_ERROR & keflags))
    213     strcat (buf, "ERROR|");
    214 #ifdef EV_KEEPUDATA
    215   if (0 != (EV_KEEPUDATA & keflags))
    216     strcat (buf, "KEEPUDATA|");
    217 #endif /* EV_KEEPUDATA */
    218 
    219   len = strlen (buf);
    220   mhd_assert (buf_size > len);
    221 
    222   extra_flags =
    223     (~((unsigned int) (EV_ADD | EV_ENABLE | EV_DISABLE | EV_DISPATCH | EV_DELETE
    224                        | EV_RECEIPT | EV_ONESHOT | EV_CLEAR | EV_EOF | EV_ERROR
    225                        | mhd_EV_KEEPUDATA_OR_ZERO))) & keflags;
    226 
    227   if (0u != extra_flags)
    228   {
    229     (void) snprintf (buf + len,
    230                      buf_size - len,
    231                      "0x%02X|",
    232                      extra_flags);
    233     len = strlen (buf);
    234     mhd_assert (buf_size > len);
    235   }
    236 
    237   if (0u == len)
    238     strcpy (buf, "0");
    239   else
    240     buf[len - 1u] = '\0'; /* Erase last '|' */
    241 }
    242 
    243 
    244 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void
    245 mhd_dbg_print_kevent (const char *fd_name,
    246                       const struct kevent *ke,
    247                       bool update_req)
    248 {
    249   char flags_txt[mhd_DBG_KEFLAGS_BUF_SIZE];
    250   const char *action_name =
    251     update_req ? "Update FD watching" : "FD state update";
    252 
    253   mdd_dbg_keflags_to_text (ke,
    254                            flags_txt);
    255 
    256   fprintf (stderr,
    257            "### %s: %4s [%2llu]; filter: %s; flags: %s;\t"
    258            "fflags: %u;\tdata %lld\n",
    259            action_name,
    260            fd_name,
    261            (unsigned long long) ke->ident,
    262            mhd_dbg_kefilter_to_name (ke),
    263            flags_txt,
    264            (unsigned int) ke->fflags,
    265            (long long) ke->data);
    266 }
    267 
    268 
    269 #  endif /* MHD_SUPPORT_KQUEUE */
    270 
    271 #else  /* ! MHD_USE_TRACE_POLLING_FDS */
    272 #  define dbg_print_fd_state_update(fd_n,fd,r_ready,w_ready,e_ready) \
    273         ((void) 0)
    274 #  ifdef MHD_SUPPORT_KQUEUE
    275 #    define mhd_dbg_print_kq_fd_mon_req(fd_name,ke)
    276 #  endif /* MHD_SUPPORT_KQUEUE */
    277 #endif /* ! MHD_USE_TRACE_POLLING_FDS */
    278 
    279 #ifdef MHD_SUPPORT_THREADS
    280 /**
    281  * Log error message about broken ITC
    282  * @param d the daemon to use
    283  */
    284 static MHD_FN_PAR_NONNULL_ALL_ void
    285 log_itc_broken (struct MHD_Daemon *restrict d)
    286 {
    287   mhd_LOG_MSG (d, \
    288                MHD_SC_ITC_STATUS_ERROR, \
    289                "System reported that ITC has an error status or broken.");
    290 }
    291 
    292 
    293 #endif /* MHD_SUPPORT_THREADS */
    294 
    295 /**
    296  * Log error message about broken listen socket
    297  * @param d the daemon to use
    298  */
    299 static MHD_FN_PAR_NONNULL_ALL_ void
    300 log_listen_broken (struct MHD_Daemon *restrict d)
    301 {
    302   mhd_LOG_MSG (d, MHD_SC_LISTEN_STATUS_ERROR, \
    303                "System reported that the listening socket has an error " \
    304                "status or broken. The daemon will not listen any more.");
    305 }
    306 
    307 
    308 static MHD_FN_PAR_NONNULL_ALL_ uint_fast64_t
    309 mhd_daemon_get_wait_erliest_timeout (const struct MHD_Daemon *restrict d)
    310 {
    311   uint_fast64_t ret;
    312   uint_fast64_t cur_milsec;
    313   const struct MHD_Connection *c;
    314 
    315   c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.def_timeout));
    316   if ((NULL == c)
    317       && (NULL == mhd_DLINKEDL_GET_LAST_D (&(d->conns.cust_timeout))))
    318     return MHD_WAIT_INDEFINITELY;
    319 
    320   /* Do not use mhd_daemon_get_milsec_counter() as actual time is required
    321      here */
    322   cur_milsec = mhd_monotonic_msec_counter ();
    323 
    324   /* Check just the first connection in the ordered "default timeout" list */
    325   if (NULL != c)
    326     ret = mhd_conn_get_timeout_left (c,
    327                                      cur_milsec);
    328   else
    329     ret = MHD_WAIT_INDEFINITELY;
    330 
    331   for (c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.cust_timeout));
    332        (NULL != c) && (0u != ret);
    333        c = mhd_DLINKEDL_GET_PREV (&(c->timeout),
    334                                   tmout_list))
    335   {
    336     uint_fast64_t conn_tmout_left;
    337     conn_tmout_left = mhd_conn_get_timeout_left (c,
    338                                                  cur_milsec);
    339     if (ret > conn_tmout_left)
    340       ret = conn_tmout_left;
    341   }
    342 
    343   return ret;
    344 }
    345 
    346 
    347 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ uint_fast64_t
    348 mhd_daemon_get_wait_max (const struct MHD_Daemon *restrict d)
    349 {
    350   uint_fast64_t ret;
    351 
    352   mhd_assert (! mhd_D_HAS_WORKERS (d));
    353 
    354   if (d->events.accept_pending && ! d->conns.block_new)
    355   {
    356 #ifdef MHD_USE_TRACE_POLLING_FDS
    357     fprintf (stderr,
    358              "### mhd_daemon_get_wait_max(daemon) -> zero "
    359              "(accept new conn pending)\n");
    360 #endif
    361     return 0;
    362   }
    363   if (d->events.act_req.resume)
    364   {
    365 #ifdef MHD_USE_TRACE_POLLING_FDS
    366     fprintf (stderr,
    367              "### mhd_daemon_get_wait_max(daemon) -> zero "
    368              "(resume connection pending)\n");
    369 #endif
    370     return 0;
    371   }
    372   if (NULL != mhd_DLINKEDL_GET_FIRST (&(d->events), proc_ready))
    373   {
    374 #ifdef MHD_USE_TRACE_POLLING_FDS
    375     fprintf (stderr,
    376              "### mhd_daemon_get_wait_max(daemon) -> zero "
    377              "(connection(s) is already ready)\n");
    378 #endif
    379     return 0;
    380   }
    381   if (NULL != mhd_DLINKEDL_GET_FIRST (&(d->events.act_req.ext_added.worker),
    382                                       queue))
    383   {
    384 #ifdef MHD_USE_TRACE_POLLING_FDS
    385     fprintf (stderr,
    386              "### mhd_daemon_get_wait_max(daemon) -> zero "
    387              "(externally added connection(s) pending)\n");
    388 #endif
    389     return 0;
    390   }
    391 #ifdef MHD_SUPPORT_KQUEUE
    392   if (mhd_D_IS_USING_KQUEUE (d))
    393   {
    394     if ((NULL != mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)) &&
    395         ! mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)->events.kq.monitored)
    396     {
    397 #ifdef MHD_USE_TRACE_POLLING_FDS
    398       fprintf (stderr,
    399                "### mhd_daemon_get_wait_max(daemon) -> zero "
    400                "(kqueue unmonitored connection(s) pending)\n");
    401 #endif
    402       return 0;
    403     }
    404   }
    405 #endif /* MHD_SUPPORT_KQUEUE */
    406 
    407   ret = mhd_daemon_get_wait_erliest_timeout (d);
    408 
    409 #ifdef MHD_USE_TRACE_POLLING_FDS
    410   if (MHD_WAIT_INDEFINITELY == ret)
    411     fprintf (stderr,
    412              "### mhd_daemon_get_wait_max(daemon) -> MHD_WAIT_INDEFINITELY\n");
    413   else
    414     fprintf (stderr,
    415              "### mhd_daemon_get_wait_max(daemon) -> %lu\n",
    416              (unsigned long) ret);
    417 #endif
    418 
    419   return ret;
    420 }
    421 
    422 
    423 static MHD_FN_PAR_NONNULL_ALL_ void
    424 start_resuming_connection (struct MHD_Connection *restrict c,
    425                            struct MHD_Daemon *restrict d)
    426 {
    427   mhd_assert (c->suspended);
    428 #ifdef MHD_USE_TRACE_SUSPEND_RESUME
    429   fprintf (stderr,
    430            "%%%%%%   Resuming connection, FD: %2llu\n",
    431            (unsigned long long) c->sk.fd);
    432 #endif /* MHD_USE_TRACE_SUSPEND_RESUME */
    433   c->suspended = false;
    434   mhd_conn_init_activity_timeout (c,
    435                                   c->timeout.milsec);
    436   mhd_conn_mark_ready (c, d); /* Force processing connection in this round */
    437 }
    438 
    439 
    440 /**
    441  * Check whether any resuming connections are pending and resume them
    442  * @param d the daemon to use
    443  */
    444 static MHD_FN_PAR_NONNULL_ALL_ void
    445 daemon_resume_conns_if_needed (struct MHD_Daemon *restrict d)
    446 {
    447   struct MHD_Connection *c;
    448 
    449   if (! d->events.act_req.resume)
    450     return;
    451 
    452   d->events.act_req.resume = false; /* Reset flag before processing data */
    453 
    454   for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
    455        NULL != c;
    456        c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
    457   {
    458     if (c->resuming)
    459       start_resuming_connection (c, d);
    460   }
    461 }
    462 
    463 
    464 #if defined (MHD_SUPPORT_POLL) || defined(MHD_SUPPORT_EPOLL)
    465 
    466 mhd_DATA_TRUNCATION_RUNTIME_CHECK_DISABLE
    467 
    468 static MHD_FN_PAR_NONNULL_ALL_ int
    469 get_max_wait (const struct MHD_Daemon *restrict d)
    470 {
    471   const uint_fast64_t ui64_wait = mhd_daemon_get_wait_max (d);
    472   int i_wait = (int) ui64_wait;
    473 
    474   if (MHD_WAIT_INDEFINITELY <= ui64_wait)
    475     return -1;
    476 
    477   if (mhd_COND_ALMOST_NEVER ((0 > i_wait) ||
    478                              (ui64_wait != (uint_fast64_t) i_wait)))
    479     return INT_MAX;
    480 
    481   return i_wait;
    482 }
    483 
    484 
    485 mhd_DATA_TRUNCATION_RUNTIME_CHECK_RESTORE
    486 /* End of warning-less data truncation */
    487 
    488 #endif
    489 /* MHD_SUPPORT_POLL || MHD_SUPPORT_EPOLL */
    490 
    491 
    492 MHD_FN_PAR_NONNULL_ (1) static void
    493 update_conn_net_status (struct MHD_Daemon *restrict d,
    494                         struct MHD_Connection *restrict c,
    495                         bool recv_ready,
    496                         bool send_ready,
    497                         bool err_state)
    498 {
    499   enum mhd_SocketNetState sk_state;
    500 
    501   mhd_assert (d == c->daemon);
    502   /* "resuming" must be not processed yet */
    503   mhd_assert (! c->resuming || c->suspended);
    504 
    505   dbg_print_fd_state_update ("conn", \
    506                              c->sk.fd, \
    507                              recv_ready, \
    508                              send_ready, \
    509                              err_state);
    510 
    511   sk_state = mhd_SOCKET_NET_STATE_NOTHING;
    512   if (recv_ready)
    513     sk_state = (enum mhd_SocketNetState)
    514                (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_RECV_READY);
    515   if (send_ready)
    516     sk_state = (enum mhd_SocketNetState)
    517                (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_SEND_READY);
    518   if (err_state)
    519     sk_state = (enum mhd_SocketNetState)
    520                (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_ERROR_READY);
    521   c->sk.ready = sk_state;
    522 
    523   if (! c->suspended)
    524     mhd_conn_mark_ready_update3 (c, err_state, d);
    525   else
    526     mhd_assert (! c->in_proc_ready);
    527 }
    528 
    529 
    530 /**
    531  * Accept new connections on the daemon
    532  * @param d the daemon to use
    533  * @return true if all incoming connections has been accepted,
    534  *         false if some connection may still wait to be accepted
    535  */
    536 MHD_FN_PAR_NONNULL_ (1) static bool
    537 daemon_accept_new_conns (struct MHD_Daemon *restrict d)
    538 {
    539   unsigned int num_to_accept;
    540   mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd);
    541   mhd_assert (! d->net.listen.is_broken);
    542   mhd_assert (! d->conns.block_new);
    543   mhd_assert (d->conns.count < d->conns.cfg.count_limit);
    544   mhd_assert (! mhd_D_HAS_WORKERS (d));
    545 
    546   if (! d->net.listen.non_block)
    547     num_to_accept = 1; /* listen socket is blocking, only one connection can be processed */
    548   else
    549   {
    550     const unsigned int slots_left = d->conns.cfg.count_limit - d->conns.count;
    551     if (! mhd_D_HAS_MASTER (d))
    552     {
    553       /* Fill up to one quarter of allowed limit in one turn */
    554       num_to_accept = d->conns.cfg.count_limit / 4;
    555       /* Limit to a reasonable number */
    556       if (((sizeof(void *) > 4) ? 4096 : 1024) < num_to_accept)
    557         num_to_accept = ((sizeof(void *) > 4) ? 4096 : 1024);
    558       if (slots_left < num_to_accept)
    559         num_to_accept = slots_left;
    560     }
    561 #ifdef MHD_SUPPORT_THREADS
    562     else
    563     {
    564       /* Has workers thread pool. Care must be taken to evenly distribute
    565          new connections in the workers pool.
    566          At the same time, the burst of new connections should be handled as
    567          quick as possible. */
    568       const unsigned int num_conn = d->conns.count;
    569       const unsigned int limit = d->conns.cfg.count_limit;
    570       const unsigned int num_workers =
    571         d->threading.hier.master->threading.hier.pool.num;
    572       if (num_conn < limit / 16)
    573       {
    574         num_to_accept = num_conn / num_workers;
    575         if (8 > num_to_accept)
    576         {
    577           if (8 > slots_left / 16)
    578             num_to_accept = slots_left / 16;
    579           else
    580             num_to_accept = 8;
    581         }
    582         if (64 < num_to_accept)
    583           num_to_accept = 64;
    584       }
    585       else if (num_conn < limit / 8)
    586       {
    587         num_to_accept = num_conn * 2 / num_workers;
    588         if (8 > num_to_accept)
    589         {
    590           if (8 > slots_left / 8)
    591             num_to_accept = slots_left / 8;
    592           else
    593             num_to_accept = 8;
    594         }
    595         if (128 < num_to_accept)
    596           num_to_accept = 128;
    597       }
    598       else if (num_conn < limit / 4)
    599       {
    600         num_to_accept = num_conn * 4 / num_workers;
    601         if (8 > num_to_accept)
    602           num_to_accept = 8;
    603         if (slots_left / 4 < num_to_accept)
    604           num_to_accept = slots_left / 4;
    605         if (256 < num_to_accept)
    606           num_to_accept = 256;
    607       }
    608       else if (num_conn < limit / 2)
    609       {
    610         num_to_accept = num_conn * 8 / num_workers;
    611         if (16 > num_to_accept)
    612           num_to_accept = 16;
    613         if (slots_left / 4 < num_to_accept)
    614           num_to_accept = slots_left / 4;
    615         if (256 < num_to_accept)
    616           num_to_accept = 256;
    617       }
    618       else if (slots_left > limit / 4)
    619       {
    620         num_to_accept = slots_left * 4 / num_workers;
    621         if (slots_left / 8 < num_to_accept)
    622           num_to_accept = slots_left / 8;
    623         if (128 < num_to_accept)
    624           num_to_accept = 128;
    625       }
    626       else if (slots_left > limit / 8)
    627       {
    628         num_to_accept = slots_left * 2 / num_workers;
    629         if (slots_left / 16 < num_to_accept)
    630           num_to_accept = slots_left / 16;
    631         if (64 < num_to_accept)
    632           num_to_accept = 64;
    633       }
    634       else /* (slots_left <= limit / 8) */
    635         num_to_accept = slots_left / 16;
    636 
    637       if (0 == num_to_accept)
    638         num_to_accept = 1;
    639       else if (slots_left > num_to_accept)
    640         num_to_accept = slots_left;
    641     }
    642 #endif /* MHD_SUPPORT_THREADS */
    643   }
    644 
    645   while (0 != --num_to_accept)
    646   {
    647     enum mhd_DaemonAcceptResult res;
    648     res = mhd_daemon_accept_connection (d);
    649     if (mhd_DAEMON_ACCEPT_NO_MORE_PENDING == res)
    650       return true;
    651     if (mhd_DAEMON_ACCEPT_FAILED == res)
    652       return false; /* This is probably "no system resources" error.
    653                        To do try to accept more connections now. */
    654   }
    655   return false; /* More connections may need to be accepted */
    656 }
    657 
    658 
    659 /**
    660  * Check whether particular connection should be excluded from standard HTTP
    661  * communication.
    662  * @param c the connection the check
    663  * @return 'true' if connection should not be used for HTTP communication
    664  *         'false' if connection should be processed as HTTP
    665  */
    666 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ bool
    667 is_conn_excluded_from_http_comm (struct MHD_Connection *restrict c)
    668 {
    669 #ifdef MHD_SUPPORT_UPGRADE
    670   if (NULL != c->upgr.c)
    671   {
    672     mhd_assert ((mhd_HTTP_STAGE_UPGRADED == c->stage) || \
    673                 (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage));
    674     return true;
    675   }
    676 #endif /* MHD_SUPPORT_UPGRADE */
    677 
    678   return c->suspended;
    679 }
    680 
    681 
    682 static bool
    683 daemon_process_all_active_conns (struct MHD_Daemon *restrict d)
    684 {
    685   struct MHD_Connection *c;
    686   mhd_assert (! mhd_D_HAS_WORKERS (d));
    687 
    688   c = mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready);
    689   while (NULL != c)
    690   {
    691     struct MHD_Connection *next;
    692     /* The current connection can be closed or removed from
    693        "ready" list */
    694     next = mhd_DLINKEDL_GET_NEXT (c, proc_ready);
    695     if (! mhd_conn_process_recv_send_data (c))
    696     {
    697       mhd_conn_pre_clean (c);
    698       mhd_conn_remove_from_daemon (c);
    699       mhd_conn_close_final (c);
    700     }
    701     else
    702     {
    703       mhd_assert (! c->resuming || c->suspended);
    704     }
    705 
    706     c = next;
    707   }
    708   return true;
    709 }
    710 
    711 
    712 #ifdef MHD_SUPPORT_UPGRADE
    713 /**
    714  * Clean-up all HTTP-Upgraded connections scheduled for clean-up
    715  * @param d the daemon to process
    716  */
    717 static MHD_FN_PAR_NONNULL_ALL_ void
    718 daemon_cleanup_upgraded_conns (struct MHD_Daemon *d)
    719 {
    720   volatile struct MHD_Daemon *voltl_d = d;
    721   mhd_assert (! mhd_D_HAS_WORKERS (d));
    722 
    723   if (NULL == mhd_DLINKEDL_GET_FIRST (&(voltl_d->conns.upgr), upgr_cleanup))
    724     return;
    725 
    726   while (true)
    727   {
    728     struct MHD_Connection *c;
    729 
    730     mhd_mutex_lock_chk (&(d->conns.upgr.ucu_lock));
    731     c = mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr), upgr_cleanup);
    732     if (NULL != c)
    733       mhd_DLINKEDL_DEL (&(d->conns.upgr), c, upgr_cleanup);
    734     mhd_mutex_unlock_chk (&(d->conns.upgr.ucu_lock));
    735 
    736     if (NULL == c)
    737       break;
    738 
    739     mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage);
    740     mhd_upgraded_deinit (c);
    741     mhd_conn_pre_clean (c);
    742     mhd_conn_remove_from_daemon (c);
    743     mhd_conn_close_final (c);
    744   }
    745 }
    746 
    747 
    748 #else  /* ! MHD_SUPPORT_UPGRADE */
    749 #define daemon_cleanup_upgraded_conns(d) ((void) d)
    750 #endif /* ! MHD_SUPPORT_UPGRADE */
    751 
    752 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void
    753 mhd_daemon_close_all_conns (struct MHD_Daemon *d)
    754 {
    755   struct MHD_Connection *c;
    756   bool has_upgraded_unclosed;
    757 
    758   has_upgraded_unclosed = false;
    759   if (! mhd_D_HAS_THR_PER_CONN (d))
    760   {
    761     for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
    762          NULL != c;
    763          c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn))
    764     {
    765 #ifdef MHD_SUPPORT_UPGRADE
    766       mhd_assert (mhd_HTTP_STAGE_UPGRADING != c->stage);
    767       mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING != c->stage);
    768       if (NULL != c->upgr.c)
    769       {
    770         mhd_assert (c == c->upgr.c);
    771         has_upgraded_unclosed = true;
    772         mhd_upgraded_deinit (c);
    773       }
    774       else /* Combined with the next 'if' */
    775 #endif
    776       if (1)
    777       {
    778 #ifdef MHD_SUPPORT_HTTP2
    779         if (mhd_C_IS_HTTP2 (c))
    780           mhd_h2_conn_h2_deinit_start_closing (c);
    781         else
    782 #endif /* MHD_SUPPORT_HTTP2 */
    783         mhd_conn_start_closing_d_shutdown (c);
    784       }
    785       mhd_conn_pre_clean (c);
    786       mhd_conn_remove_from_daemon (c);
    787       mhd_conn_close_final (c);
    788     }
    789   }
    790   else
    791     mhd_assert (0 && "Not implemented yet");
    792 
    793   if (has_upgraded_unclosed)
    794     mhd_LOG_MSG (d, MHD_SC_DAEMON_DESTROYED_WITH_UNCLOSED_UPGRADED, \
    795                  "The daemon is being destroyed, but at least one " \
    796                  "HTTP-Upgraded connection is unclosed. Any use (including " \
    797                  "closing) of such connections is undefined behaviour.");
    798 }
    799 
    800 
    801 /**
    802  * Process all external events updated of existing connections, information
    803  * about new connections pending to be accept()'ed, presence of the events on
    804  * the daemon's ITC; resume connections.
    805  * @return 'true' if processed successfully,
    806  *         'false' is unrecoverable error occurs and the daemon must be
    807  *         closed
    808  */
    809 static MHD_FN_PAR_NONNULL_ (1) bool
    810 ext_events_process_net_updates_and_resume_conn (struct MHD_Daemon *restrict d)
    811 {
    812   struct MHD_Connection *restrict c;
    813 
    814   mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
    815   mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type);
    816 
    817   d->events.act_req.resume = false; /* Reset flag before processing data */
    818 
    819 #ifdef MHD_SUPPORT_THREADS
    820   if (d->events.data.extr.itc_data.is_active)
    821   {
    822     d->events.data.extr.itc_data.is_active = false;
    823     /* Clear ITC here, before other data processing.
    824      * Any external events will activate ITC again if additional data to
    825      * process is added externally. Clearing ITC early ensures that new data
    826      * (with additional ITC activation) will not be missed. */
    827     mhd_itc_clear (d->threading.itc);
    828   }
    829 #endif /* MHD_SUPPORT_THREADS */
    830 
    831   for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
    832        NULL != c;
    833        c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
    834   {
    835     bool has_err_state;
    836 
    837     if (c->resuming)
    838       start_resuming_connection (c, d);
    839     else
    840     {
    841       if (is_conn_excluded_from_http_comm (c))
    842       {
    843         mhd_assert (! c->in_proc_ready);
    844         continue;
    845       }
    846 
    847       has_err_state = (0 != (((unsigned int) c->sk.ready)
    848                              & mhd_SOCKET_NET_STATE_ERROR_READY));
    849 
    850       mhd_conn_mark_ready_update3 (c,
    851                                    has_err_state,
    852                                    d);
    853     }
    854   }
    855 
    856   return true;
    857 }
    858 
    859 
    860 /**
    861  * Update all registrations of FDs for external monitoring.
    862  * @return #MHD_SC_OK on success,
    863  *         error code otherwise
    864  */
    865 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
    866 ext_events_update_registrations (struct MHD_Daemon *restrict d)
    867 {
    868   const bool rereg_all = d->events.data.extr.reg_all;
    869   const bool edge_trigg = (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int);
    870   bool daemon_fds_succeed;
    871   struct MHD_Connection *c;
    872   struct MHD_Connection *c_next;
    873 
    874   mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
    875   mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type);
    876 
    877   /* (Re-)register daemon's FDs */
    878 
    879 #ifdef MHD_SUPPORT_THREADS
    880   if (rereg_all ||
    881       (NULL == d->events.data.extr.itc_data.app_cntx))
    882   {
    883     /* (Re-)register ITC FD */
    884     d->events.data.extr.itc_data.app_cntx =
    885       mhd_daemon_extr_event_reg (d,
    886                                  mhd_itc_r_fd (d->threading.itc),
    887                                  MHD_FD_STATE_RECV_EXCEPT,
    888                                  d->events.data.extr.itc_data.app_cntx,
    889                                  (struct MHD_EventUpdateContext *)
    890                                  mhd_SOCKET_REL_MARKER_ITC);
    891   }
    892   daemon_fds_succeed = (NULL != d->events.data.extr.itc_data.app_cntx);
    893 #else  /* ! MHD_SUPPORT_THREADS */
    894   daemon_fds_succeed = true;
    895 #endif /* ! MHD_SUPPORT_THREADS */
    896 
    897   if (daemon_fds_succeed)
    898   {
    899     if ((MHD_INVALID_SOCKET == d->net.listen.fd) &&
    900         (NULL != d->events.data.extr.listen_data.app_cntx))
    901     {
    902       /* De-register the listen FD */
    903       d->events.data.extr.listen_data.app_cntx =
    904         mhd_daemon_extr_event_reg (d,
    905                                    d->net.listen.fd,
    906                                    MHD_FD_STATE_NONE,
    907                                    d->events.data.extr.listen_data.app_cntx,
    908                                    (struct MHD_EventUpdateContext *)
    909                                    mhd_SOCKET_REL_MARKER_LISTEN);
    910       if (NULL != d->events.data.extr.listen_data.app_cntx)
    911         mhd_log_extr_event_dereg_failed (d);
    912     }
    913     else if ((MHD_INVALID_SOCKET != d->net.listen.fd) &&
    914              (rereg_all || (NULL == d->events.data.extr.listen_data.app_cntx)))
    915     {
    916       /* (Re-)register listen FD */
    917       d->events.data.extr.listen_data.app_cntx =
    918         mhd_daemon_extr_event_reg (d,
    919                                    d->net.listen.fd,
    920                                    MHD_FD_STATE_RECV_EXCEPT,
    921                                    d->events.data.extr.listen_data.app_cntx,
    922                                    (struct MHD_EventUpdateContext *)
    923                                    mhd_SOCKET_REL_MARKER_LISTEN);
    924 
    925       daemon_fds_succeed = (NULL != d->events.data.extr.listen_data.app_cntx);
    926     }
    927   }
    928 
    929   if (! daemon_fds_succeed)
    930   {
    931     mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \
    932                  "Failed to register daemon FDs in the application "
    933                  "(external events) monitoring.");
    934     return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE;
    935   }
    936 
    937   for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
    938        NULL != c;
    939        c = c_next)
    940   {
    941     enum MHD_FdState watch_for;
    942 
    943     /* Get the next connection now, as the current connection could be removed
    944        from the daemon. */
    945     c_next = mhd_DLINKEDL_GET_NEXT (c,all_conn);
    946 
    947     mhd_assert (! c->resuming || c->suspended);
    948 
    949     if (is_conn_excluded_from_http_comm (c))
    950     {
    951       if (NULL != c->events.extrn.app_cntx)
    952       {
    953         /* De-register the connection socket FD */
    954         c->events.extrn.app_cntx =
    955           mhd_daemon_extr_event_reg (d,
    956                                      c->sk.fd,
    957                                      MHD_FD_STATE_NONE,
    958                                      c->events.extrn.app_cntx,
    959                                      (struct MHD_EventUpdateContext *) c);
    960         if (NULL != c->events.extrn.app_cntx)
    961           mhd_log_extr_event_dereg_failed (d);
    962       }
    963       continue;
    964     }
    965 
    966     watch_for =
    967       edge_trigg ?
    968       MHD_FD_STATE_RECV_SEND_EXCEPT :
    969       (enum MHD_FdState) (MHD_FD_STATE_EXCEPT
    970                           | (((unsigned int) c->event_loop_info)
    971                              & (MHD_EVENT_LOOP_INFO_RECV
    972                                 | MHD_EVENT_LOOP_INFO_SEND)));
    973 
    974     mhd_assert ((! edge_trigg) || \
    975                 (MHD_FD_STATE_RECV_SEND_EXCEPT == c->events.extrn.reg_for) || \
    976                 (NULL == c->events.extrn.app_cntx));
    977 
    978     if ((NULL == c->events.extrn.app_cntx) ||
    979         rereg_all ||
    980         (! edge_trigg && (watch_for != c->events.extrn.reg_for)))
    981     {
    982       /* (Re-)register the connection socket FD */
    983       c->events.extrn.app_cntx =
    984         mhd_daemon_extr_event_reg (d,
    985                                    c->sk.fd,
    986                                    watch_for,
    987                                    c->events.extrn.app_cntx,
    988                                    (struct MHD_EventUpdateContext *) c);
    989       if (NULL == c->events.extrn.app_cntx)
    990       {
    991         mhd_conn_start_closing_ext_event_failed (c);
    992         mhd_conn_pre_clean (c);
    993         mhd_conn_remove_from_daemon (c);
    994         mhd_conn_close_final (c);
    995       }
    996       c->events.extrn.reg_for = watch_for;
    997     }
    998   }
    999 
   1000   return MHD_SC_OK;
   1001 }
   1002 
   1003 
   1004 #ifdef MHD_SUPPORT_SELECT
   1005 
   1006 /**
   1007  * Add socket to the fd_set
   1008  * @param fd the socket to add
   1009  * @param fs the pointer to fd_set
   1010  * @param max the pointer to variable to be updated with maximum FD value (or
   1011  *            set to non-zero in case of WinSock)
   1012  * @param d the daemon object
   1013  */
   1014 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_
   1015 MHD_FN_PAR_INOUT_ (2)
   1016 MHD_FN_PAR_INOUT_ (3) void
   1017 fd_set_wrap (MHD_Socket fd,
   1018              fd_set *restrict fs,
   1019              int *restrict max,
   1020              struct MHD_Daemon *restrict d)
   1021 {
   1022   mhd_assert (mhd_FD_FITS_DAEMON (d, fd)); /* Must be checked for every FD before
   1023                                               it is added */
   1024   mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
   1025   (void) d; /* Unused with non-debug builds */
   1026 #if defined(MHD_SOCKETS_KIND_POSIX)
   1027   FD_SET (fd, fs);
   1028   if (*max < fd)
   1029     *max = fd;
   1030 #elif defined(MHD_SOCKETS_KIND_WINSOCK)
   1031   /* Use custom set function to take advantage of know uniqueness of
   1032    * used sockets (to skip useless (for this function) check for duplicated
   1033    * sockets implemented in system's macro). */
   1034   mhd_assert (fs->fd_count < FD_SETSIZE - 1); /* Daemon limits set to always fit FD_SETSIZE */
   1035   mhd_assert (! FD_ISSET (fd, fs)); /* All sockets must be unique */
   1036   fs->fd_array[fs->fd_count++] = fd;
   1037   *max = 1;
   1038 #else
   1039 #error Unknown sockets type
   1040 #endif
   1041 }
   1042 
   1043 
   1044 /**
   1045  * Set daemon's FD_SETs to monitor all daemon's sockets
   1046  * @param d the daemon to use
   1047  * @param listen_only set to 'true' if connections's sockets should NOT
   1048  *                    be monitored
   1049  * @return with POSIX sockets: the maximum number of the socket used in
   1050  *                             the FD_SETs;
   1051  *         with winsock: non-zero if at least one socket has been added to
   1052  *                       the FD_SETs,
   1053  *                       zero if no sockets in the FD_SETs
   1054  */
   1055 static MHD_FN_PAR_NONNULL_ (1) int
   1056 select_update_fdsets (struct MHD_Daemon *restrict d,
   1057                       bool listen_only)
   1058 {
   1059   struct MHD_Connection *c;
   1060   fd_set *const restrict rfds = d->events.data.select.rfds;
   1061   fd_set *const restrict wfds = d->events.data.select.wfds;
   1062   fd_set *const restrict efds = d->events.data.select.efds;
   1063   int ret;
   1064 
   1065   mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
   1066   mhd_assert (NULL != rfds);
   1067   mhd_assert (NULL != wfds);
   1068   mhd_assert (NULL != efds);
   1069   FD_ZERO (rfds);
   1070   FD_ZERO (wfds);
   1071   FD_ZERO (efds);
   1072 
   1073   ret = 0;
   1074 #ifdef MHD_SUPPORT_THREADS
   1075   mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
   1076   fd_set_wrap (mhd_itc_r_fd (d->threading.itc),
   1077                rfds,
   1078                &ret,
   1079                d);
   1080   fd_set_wrap (mhd_itc_r_fd (d->threading.itc),
   1081                efds,
   1082                &ret,
   1083                d);
   1084   mhd_dbg_print_fd_mon_req ("ITC", \
   1085                             mhd_itc_r_fd (d->threading.itc), \
   1086                             true, \
   1087                             false, \
   1088                             true);
   1089 #endif
   1090   if ((MHD_INVALID_SOCKET != d->net.listen.fd)
   1091       && ! d->conns.block_new)
   1092   {
   1093     mhd_assert (! d->net.listen.is_broken);
   1094 
   1095     fd_set_wrap (d->net.listen.fd,
   1096                  rfds,
   1097                  &ret,
   1098                  d);
   1099     fd_set_wrap (d->net.listen.fd,
   1100                  efds,
   1101                  &ret,
   1102                  d);
   1103     mhd_dbg_print_fd_mon_req ("lstn", \
   1104                               d->net.listen.fd, \
   1105                               true, \
   1106                               false, \
   1107                               true);
   1108   }
   1109   if (listen_only)
   1110     return ret;
   1111 
   1112   for (c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn); NULL != c;
   1113        c = mhd_DLINKEDL_GET_PREV (c,all_conn))
   1114   {
   1115     mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage);
   1116     if (is_conn_excluded_from_http_comm (c))
   1117       continue;
   1118 
   1119     if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))
   1120       fd_set_wrap (c->sk.fd,
   1121                    rfds,
   1122                    &ret,
   1123                    d);
   1124     if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND))
   1125       fd_set_wrap (c->sk.fd,
   1126                    wfds,
   1127                    &ret,
   1128                    d);
   1129     fd_set_wrap (c->sk.fd,
   1130                  efds,
   1131                  &ret,
   1132                  d);
   1133     mhd_dbg_print_fd_mon_req ("conn", \
   1134                               c->sk.fd, \
   1135                               FD_ISSET (c->sk.fd, rfds), \
   1136                               FD_ISSET (c->sk.fd, wfds), \
   1137                               true);
   1138   }
   1139 
   1140   return ret;
   1141 }
   1142 
   1143 
   1144 static MHD_FN_PAR_NONNULL_ (1) bool
   1145 select_update_statuses_from_fdsets_and_resume_conn (struct MHD_Daemon *d,
   1146                                                     int num_events)
   1147 {
   1148   struct MHD_Connection *c;
   1149   fd_set *const restrict rfds = d->events.data.select.rfds;
   1150   fd_set *const restrict wfds = d->events.data.select.wfds;
   1151   fd_set *const restrict efds = d->events.data.select.efds;
   1152   bool resuming_conn;
   1153 
   1154   mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
   1155   mhd_assert (0 <= num_events);
   1156   mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements);
   1157 
   1158   resuming_conn = d->events.act_req.resume;
   1159   if (resuming_conn)
   1160   {
   1161     mhd_assert (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type));
   1162     mhd_assert (! mhd_D_HAS_THR_PER_CONN (d));
   1163     num_events = (int) -1; /* Force process all connections */
   1164     d->events.act_req.resume = false;
   1165   }
   1166 
   1167 #ifndef MHD_FAVOR_SMALL_CODE
   1168   if (0 == num_events)
   1169     return true;
   1170 #endif /* MHD_FAVOR_SMALL_CODE */
   1171 
   1172 #ifdef MHD_SUPPORT_THREADS
   1173   mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
   1174   dbg_print_fd_state_update ("ITC", \
   1175                              mhd_itc_r_fd (d->threading.itc), \
   1176                              FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds), \
   1177                              FD_ISSET (mhd_itc_r_fd (d->threading.itc), wfds), \
   1178                              FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds));
   1179   if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds))
   1180   {
   1181     log_itc_broken (d);
   1182     /* ITC is broken, need to stop the daemon thread now as otherwise
   1183        application will not be able to stop the thread. */
   1184     return false;
   1185   }
   1186   if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds))
   1187   {
   1188     --num_events;
   1189     /* Clear ITC here, before other data processing.
   1190      * Any external events will activate ITC again if additional data to
   1191      * process is added externally. Clearing ITC early ensures that new data
   1192      * (with additional ITC activation) will not be missed. */
   1193     mhd_itc_clear (d->threading.itc);
   1194   }
   1195 
   1196 #ifndef MHD_FAVOR_SMALL_CODE
   1197   if (0 == num_events)
   1198     return true;
   1199 #endif /* MHD_FAVOR_SMALL_CODE */
   1200 #endif /* MHD_SUPPORT_THREADS */
   1201 
   1202   if (MHD_INVALID_SOCKET != d->net.listen.fd)
   1203   {
   1204     mhd_assert (! d->net.listen.is_broken);
   1205     dbg_print_fd_state_update ("lstn", \
   1206                                d->net.listen.fd, \
   1207                                FD_ISSET (d->net.listen.fd, rfds), \
   1208                                FD_ISSET (d->net.listen.fd, wfds), \
   1209                                FD_ISSET (d->net.listen.fd, efds));
   1210     if (FD_ISSET (d->net.listen.fd, efds))
   1211     {
   1212       --num_events;
   1213       log_listen_broken (d);
   1214       /* Close the listening socket unless the master daemon should close it */
   1215       if (! mhd_D_HAS_MASTER (d))
   1216         mhd_socket_close (d->net.listen.fd);
   1217 
   1218       d->events.accept_pending = false;
   1219       d->net.listen.is_broken = true;
   1220       /* Stop monitoring socket to avoid spinning with busy-waiting */
   1221       d->net.listen.fd = MHD_INVALID_SOCKET;
   1222 #ifndef MHD_FAVOR_SMALL_CODE
   1223       if (FD_ISSET (d->net.listen.fd, rfds))
   1224         --num_events;
   1225 #endif /* MHD_FAVOR_SMALL_CODE */
   1226     }
   1227     else
   1228     {
   1229       d->events.accept_pending = FD_ISSET (d->net.listen.fd, rfds);
   1230       if (d->events.accept_pending)
   1231         --num_events;
   1232     }
   1233   }
   1234 
   1235   mhd_assert ((0 == num_events) || \
   1236               (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type)));
   1237 
   1238 #ifdef MHD_FAVOR_SMALL_CODE
   1239   (void) num_events;
   1240   num_events = 1; /* Use static value to minimise the binary size of the next loop */
   1241 #endif /* ! MHD_FAVOR_SMALL_CODE */
   1242 
   1243   for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns), all_conn);
   1244        (NULL != c) && (0 != num_events);
   1245        c = mhd_DLINKEDL_GET_NEXT (c, all_conn))
   1246   {
   1247     if (c->resuming)
   1248       start_resuming_connection (c, d);
   1249     else
   1250     {
   1251       MHD_Socket sk;
   1252       bool recv_ready;
   1253       bool send_ready;
   1254       bool err_state;
   1255 
   1256       if (is_conn_excluded_from_http_comm (c))
   1257         continue;
   1258 
   1259       sk = c->sk.fd;
   1260       recv_ready = FD_ISSET (sk, rfds);
   1261       send_ready = FD_ISSET (sk, wfds);
   1262       err_state = FD_ISSET (sk, efds);
   1263 
   1264       update_conn_net_status (d,
   1265                               c,
   1266                               recv_ready,
   1267                               send_ready,
   1268                               err_state);
   1269 #ifndef MHD_FAVOR_SMALL_CODE
   1270       num_events -=
   1271         (recv_ready ? 1 : 0) + (send_ready ? 1 : 0) + (err_state ? 1 : 0);
   1272 #endif /* MHD_FAVOR_SMALL_CODE */
   1273     }
   1274   }
   1275 
   1276 #ifndef MHD_FAVOR_SMALL_CODE
   1277   mhd_assert ((0 == num_events) || resuming_conn);
   1278 #endif /* MHD_FAVOR_SMALL_CODE */
   1279   return true;
   1280 }
   1281 
   1282 
   1283 /**
   1284  * Get pointer to struct timeval for select() for polling daemon's sockets
   1285  * @param d the daemon to use
   1286  * @param[out] tmvl to pointer to the allocated struct timeval
   1287  * @return the @a tmvl pointer (with maximum wait value set)
   1288  *         or NULL if select may wait indefinitely
   1289  */
   1290 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_
   1291 MHD_FN_PAR_OUT_ (2) struct timeval *
   1292 get_timeval_for_select (const struct MHD_Daemon *restrict d,
   1293                         struct timeval *tmvl)
   1294 {
   1295   const uint_fast64_t max_wait = mhd_daemon_get_wait_max (d);
   1296 #ifdef HAVE_TIME_T
   1297   time_t max_wait_secs = (time_t) (max_wait / 1000u);
   1298 #else  /* ! HAVE_TIME_T */
   1299   long max_wait_secs = (long) (max_wait / 1000u);
   1300 #endif /* ! HAVE_TIME_T */
   1301 #ifdef HAVE_SUSECONDS_T
   1302   suseconds_t max_wait_usecs = (suseconds_t) ((max_wait % 1000u) * 1000u);
   1303 #else  /* ! HAVE_SUSECONDS_T */
   1304   long max_wait_usecs = (long) ((max_wait % 1000u) * 1000u);
   1305 #endif /* ! HAVE_SUSECONDS_T */
   1306 
   1307   if (MHD_WAIT_INDEFINITELY <= max_wait)
   1308     return NULL;
   1309 
   1310   if (0u == max_wait)
   1311   {
   1312     tmvl->tv_sec = 0;
   1313     tmvl->tv_usec = 0;
   1314 
   1315     return tmvl;
   1316   }
   1317 
   1318   if (mhd_COND_ALMOST_NEVER ((max_wait / 1000u !=
   1319                               (uint_fast64_t) max_wait_secs) ||
   1320                              (max_wait_secs <= 0)))
   1321   {
   1322     /* Do not bother figuring out the real maximum 'time_t' value.
   1323        '0x7FFFFFFF' is large enough to be already unrealistic and should
   1324        fit most of signed or unsigned time_t types. */
   1325     tmvl->tv_sec = 0x7FFFFFFF;
   1326     tmvl->tv_usec = 0;
   1327 
   1328     return tmvl;
   1329   }
   1330 
   1331   tmvl->tv_sec = max_wait_secs;
   1332   tmvl->tv_usec = max_wait_usecs;
   1333 
   1334   return tmvl;
   1335 }
   1336 
   1337 
   1338 /**
   1339  * Update states of all connections, check for connection pending
   1340  * to be accept()'ed, check for the events on ITC; resume connections
   1341  * @param listen_only set to 'true' if connections's sockets should NOT
   1342  *                    be monitored
   1343  * @return 'true' if processed successfully,
   1344  *         'false' is unrecoverable error occurs and the daemon must be
   1345  *         closed
   1346  */
   1347 static MHD_FN_PAR_NONNULL_ (1) bool
   1348 get_all_net_updates_by_select_and_resume_conn (struct MHD_Daemon *restrict d,
   1349                                                bool listen_only)
   1350 {
   1351   int max_socket;
   1352   struct timeval tmvl_value;
   1353   struct timeval *tmvl_ptr;
   1354   int num_events;
   1355   mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
   1356 
   1357   max_socket = select_update_fdsets (d,
   1358                                      listen_only);
   1359 
   1360   tmvl_ptr = get_timeval_for_select (d,
   1361                                      &tmvl_value);
   1362 
   1363 #ifdef MHD_SOCKETS_KIND_WINSOCK
   1364   if (0 == max_socket)
   1365   {
   1366     Sleep (tmvl_ptr ? tmvl_ptr->tv_sec : 600);
   1367     return true;
   1368   }
   1369 #endif /* MHD_SOCKETS_KIND_WINSOCK */
   1370 
   1371 #ifdef MHD_USE_TRACE_POLLING_FDS
   1372   if (NULL != tmvl_ptr)
   1373     fprintf (stderr,
   1374              "### (Starting) select(%d, rfds, wfds, efds, [%llu, %llu])...\n",
   1375              max_socket + 1,
   1376              (unsigned long long) tmvl_ptr->tv_sec,
   1377              (unsigned long long) tmvl_ptr->tv_usec);
   1378   else
   1379     fprintf (stderr,
   1380              "### (Starting) select(%d, rfds, wfds, efds, [NULL])...\n",
   1381              max_socket + 1);
   1382 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1383   num_events = select (max_socket + 1,
   1384                        d->events.data.select.rfds,
   1385                        d->events.data.select.wfds,
   1386                        d->events.data.select.efds,
   1387                        tmvl_ptr);
   1388 #ifdef MHD_USE_TRACE_POLLING_FDS
   1389   if (NULL != tmvl_ptr)
   1390     fprintf (stderr,
   1391              "### (Finished) select(%d, rfds, wfds, efds, ->[%llu, %llu]) -> "
   1392              "%d\n",
   1393              max_socket + 1,
   1394              (unsigned long long) tmvl_ptr->tv_sec,
   1395              (unsigned long long) tmvl_ptr->tv_usec,
   1396              num_events);
   1397   else
   1398     fprintf (stderr,
   1399              "### (Finished) select(%d, rfds, wfds, efds, [NULL]) -> "
   1400              "%d\n",
   1401              max_socket + 1,
   1402              num_events);
   1403 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1404 
   1405   if (0 > num_events)
   1406   {
   1407     int err;
   1408     bool is_hard_error;
   1409     bool is_ignored_error;
   1410     is_hard_error = false;
   1411     is_ignored_error = false;
   1412 #if defined(MHD_SOCKETS_KIND_POSIX)
   1413     err = errno;
   1414     if (0 != err)
   1415     {
   1416       is_hard_error =
   1417         ((mhd_EBADF_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err));
   1418       is_ignored_error = (mhd_EINTR_OR_ZERO == err);
   1419     }
   1420 #elif defined(MHD_SOCKETS_KIND_WINSOCK)
   1421     err = WSAGetLastError ();
   1422     is_hard_error =
   1423       ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err) ||
   1424        (WSANOTINITIALISED == err));
   1425 #endif
   1426     if (! is_ignored_error)
   1427     {
   1428       if (is_hard_error)
   1429       {
   1430         mhd_LOG_MSG (d, MHD_SC_SELECT_HARD_ERROR, \
   1431                      "The select() encountered unrecoverable error.");
   1432         return false;
   1433       }
   1434       mhd_LOG_MSG (d, MHD_SC_SELECT_SOFT_ERROR, \
   1435                    "The select() encountered error.");
   1436       return true;
   1437     }
   1438   }
   1439 
   1440   return select_update_statuses_from_fdsets_and_resume_conn (d, num_events);
   1441 }
   1442 
   1443 
   1444 #endif /* MHD_SUPPORT_SELECT */
   1445 
   1446 
   1447 #ifdef MHD_SUPPORT_POLL
   1448 
   1449 static MHD_FN_PAR_NONNULL_ (1) unsigned int
   1450 poll_update_fds (struct MHD_Daemon *restrict d,
   1451                  bool listen_only)
   1452 {
   1453   unsigned int i_s;
   1454   unsigned int i_c;
   1455   struct MHD_Connection *restrict c;
   1456 #ifndef NDEBUG
   1457   unsigned int num_skipped = 0;
   1458 #endif /* ! NDEBUG */
   1459 
   1460   mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type);
   1461 
   1462   i_s = 0;
   1463 #ifdef MHD_SUPPORT_THREADS
   1464   mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
   1465   mhd_assert (d->events.data.poll.fds[i_s].fd == \
   1466               mhd_itc_r_fd (d->threading.itc));
   1467   mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \
   1468               d->events.data.poll.rel[i_s].fd_id);
   1469 #ifndef HAVE_POLL_CLOBBERS_EVENTS
   1470   mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events);
   1471 #else  /* HAVE_POLL_CLOBBERS_EVENTS */
   1472   d->events.data.poll.fds[i_s].events = POLLIN;
   1473 #endif /* HAVE_POLL_CLOBBERS_EVENTS */
   1474   mhd_dbg_print_fd_mon_req ("ITC", \
   1475                             mhd_itc_r_fd (d->threading.itc), \
   1476                             true, \
   1477                             false, \
   1478                             false);
   1479   ++i_s;
   1480 #endif
   1481   if (MHD_INVALID_SOCKET != d->net.listen.fd)
   1482   {
   1483     mhd_assert (! d->net.listen.is_broken);
   1484     mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd);
   1485     mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \
   1486                 d->events.data.poll.rel[i_s].fd_id);
   1487 #ifndef HAVE_POLL_CLOBBERS_EVENTS
   1488     mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) ||
   1489                 (0 == d->events.data.poll.fds[i_s].events));
   1490 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */
   1491     d->events.data.poll.fds[i_s].events = d->conns.block_new ? 0 : POLLIN;
   1492     mhd_dbg_print_fd_mon_req ("lstn", \
   1493                               d->net.listen.fd, \
   1494                               POLLIN == d->events.data.poll.fds[i_s].events, \
   1495                               false, \
   1496                               false);
   1497     ++i_s;
   1498   }
   1499   if (listen_only)
   1500     return i_s;
   1501 
   1502   i_c = i_s;
   1503   for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c;
   1504        c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
   1505   {
   1506     unsigned short events; /* 'unsigned' for correct bits manipulations */
   1507 
   1508     if (is_conn_excluded_from_http_comm (c))
   1509     {
   1510 #ifndef NDEBUG
   1511       ++num_skipped;
   1512 #endif /* ! NDEBUG */
   1513       continue;
   1514     }
   1515 
   1516     mhd_assert ((i_c - i_s) < d->conns.cfg.count_limit);
   1517     mhd_assert (i_c < d->dbg.num_events_elements);
   1518     mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage);
   1519 
   1520     d->events.data.poll.fds[i_c].fd = c->sk.fd;
   1521     d->events.data.poll.rel[i_c].connection = c;
   1522     events = 0;
   1523     if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))
   1524       events |= MHD_POLL_IN;
   1525     if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND))
   1526       events |= MHD_POLL_OUT;
   1527 
   1528     d->events.data.poll.fds[i_c].events = (short) events;
   1529     mhd_dbg_print_fd_mon_req ("conn", \
   1530                               c->sk.fd, \
   1531                               MHD_POLL_IN == (MHD_POLL_IN & events), \
   1532                               MHD_POLL_OUT == (MHD_POLL_OUT & events), \
   1533                               false);
   1534     ++i_c;
   1535   }
   1536   mhd_assert ((d->conns.count - num_skipped) == (i_c - i_s));
   1537   mhd_assert (i_c <= d->dbg.num_events_elements);
   1538   return i_c;
   1539 }
   1540 
   1541 
   1542 static MHD_FN_PAR_NONNULL_ (1) bool
   1543 poll_update_statuses_from_fds (struct MHD_Daemon *restrict d,
   1544                                int num_events)
   1545 {
   1546   unsigned int i_s;
   1547   unsigned int i_c;
   1548   mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type);
   1549   mhd_assert (0 <= num_events);
   1550   mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements);
   1551 
   1552   if (0 == num_events)
   1553     return true;
   1554 
   1555   i_s = 0;
   1556 #ifdef MHD_SUPPORT_THREADS
   1557   mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
   1558   mhd_assert (d->events.data.poll.fds[i_s].fd == \
   1559               mhd_itc_r_fd (d->threading.itc));
   1560   mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \
   1561               d->events.data.poll.rel[i_s].fd_id);
   1562 #ifndef HAVE_POLL_CLOBBERS_EVENTS
   1563   mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events);
   1564 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */
   1565   dbg_print_fd_state_update ( \
   1566     "ITC", \
   1567     d->events.data.poll.fds[i_s].fd, \
   1568     0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN)), \
   1569     0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_OUT | POLLOUT)), \
   1570     0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL)));
   1571 
   1572   if (0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL)))
   1573   {
   1574     log_itc_broken (d);
   1575     /* ITC is broken, need to stop the daemon thread now as otherwise
   1576        application will not be able to stop the thread. */
   1577     return false;
   1578   }
   1579   if (0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN)))
   1580   {
   1581     --num_events;
   1582     /* Clear ITC here, before other data processing.
   1583      * Any external events will activate ITC again if additional data to
   1584      * process is added externally. Clearing ITC early ensures that new data
   1585      * (with additional ITC activation) will not be missed. */
   1586     mhd_itc_clear (d->threading.itc);
   1587   }
   1588   ++i_s;
   1589 
   1590   if (0 == num_events)
   1591     return true;
   1592 #endif /* MHD_SUPPORT_THREADS */
   1593 
   1594   if (MHD_INVALID_SOCKET != d->net.listen.fd)
   1595   {
   1596     const short revents = d->events.data.poll.fds[i_s].revents;
   1597 
   1598     mhd_assert (! d->net.listen.is_broken);
   1599     mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd);
   1600     mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \
   1601                 d->events.data.poll.rel[i_s].fd_id);
   1602 #ifndef HAVE_POLL_CLOBBERS_EVENTS
   1603     mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) ||
   1604                 (0 == d->events.data.poll.fds[i_s].events));
   1605 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */
   1606     dbg_print_fd_state_update ("lstn", \
   1607                                d->events.data.poll.fds[i_s].fd, \
   1608                                0 != (revents & (MHD_POLL_IN | POLLIN)), \
   1609                                0 != (revents & (MHD_POLL_OUT | POLLOUT)), \
   1610                                0 != (revents & (POLLERR | POLLNVAL | POLLHUP)));
   1611     if (0 != (revents & (POLLERR | POLLNVAL | POLLHUP)))
   1612     {
   1613       --num_events;
   1614       log_listen_broken (d);
   1615       /* Close the listening socket unless the master daemon should close it */
   1616       if (! mhd_D_HAS_MASTER (d))
   1617         mhd_socket_close (d->net.listen.fd);
   1618 
   1619       d->events.accept_pending = false;
   1620       d->net.listen.is_broken = true;
   1621       /* Stop monitoring socket to avoid spinning with busy-waiting */
   1622       d->net.listen.fd = MHD_INVALID_SOCKET;
   1623     }
   1624     else
   1625     {
   1626       const bool has_new_conns = (0 != (revents & (MHD_POLL_IN | POLLIN)));
   1627       if (has_new_conns)
   1628       {
   1629         --num_events;
   1630         d->events.accept_pending = true;
   1631       }
   1632       else
   1633       {
   1634         /* Check whether the listen socket was monitored for incoming
   1635            connections */
   1636         if (0 != (d->events.data.poll.fds[i_s].events & POLLIN))
   1637           d->events.accept_pending = false;
   1638       }
   1639     }
   1640     ++i_s;
   1641   }
   1642 
   1643   mhd_assert ((0 == num_events) || \
   1644               (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type)));
   1645 
   1646   for (i_c = i_s; (i_c < i_s + d->conns.count) && (0 < num_events); ++i_c)
   1647   {
   1648     struct MHD_Connection *restrict c;
   1649     bool recv_ready;
   1650     bool send_ready;
   1651     bool err_state;
   1652     short revents;
   1653     mhd_assert (i_c < d->dbg.num_events_elements);
   1654     mhd_assert (mhd_SOCKET_REL_MARKER_EMPTY != \
   1655                 d->events.data.poll.rel[i_c].fd_id);
   1656     mhd_assert (mhd_SOCKET_REL_MARKER_ITC != \
   1657                 d->events.data.poll.rel[i_c].fd_id);
   1658     mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN != \
   1659                 d->events.data.poll.rel[i_c].fd_id);
   1660 
   1661     c = d->events.data.poll.rel[i_c].connection;
   1662     mhd_assert (! is_conn_excluded_from_http_comm (c));
   1663     mhd_assert (c->sk.fd == d->events.data.poll.fds[i_c].fd);
   1664     revents = d->events.data.poll.fds[i_c].revents;
   1665     recv_ready = (0 != (revents & (MHD_POLL_IN | POLLIN)));
   1666     send_ready = (0 != (revents & (MHD_POLL_OUT | POLLOUT)));
   1667 #ifndef MHD_POLLHUP_ON_REM_SHUT_WR
   1668     err_state = (0 != (revents & (POLLHUP | POLLERR | POLLNVAL)));
   1669 #else
   1670     err_state = (0 != (revents & (POLLERR | POLLNVAL)));
   1671     if (0 != (revents & POLLHUP))
   1672     { /* This can be a disconnect OR remote side set SHUT_WR */
   1673       recv_ready = true; /* Check the socket by reading */
   1674       if (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))
   1675         err_state = true; /* The socket will not be checked by reading, the only way to avoid spinning */
   1676     }
   1677 #endif
   1678     if (0 != (revents & (MHD_POLLPRI | MHD_POLLRDBAND)))
   1679     { /* Statuses were not requested, but returned */
   1680       if (! recv_ready ||
   1681           (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)))
   1682         err_state = true; /* The socket will not be read, the only way to avoid spinning */
   1683     }
   1684     if (0 != (revents & MHD_POLLWRBAND))
   1685     { /* Status was not requested, but returned */
   1686       if (! send_ready ||
   1687           (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND)))
   1688         err_state = true; /* The socket will not be written, the only way to avoid spinning */
   1689     }
   1690 
   1691     update_conn_net_status (d, c, recv_ready, send_ready, err_state);
   1692   }
   1693   mhd_assert (d->conns.count >= (i_c - i_s));
   1694   mhd_assert (i_c <= d->dbg.num_events_elements);
   1695   return true;
   1696 }
   1697 
   1698 
   1699 static MHD_FN_PAR_NONNULL_ (1) bool
   1700 get_all_net_updates_by_poll (struct MHD_Daemon *restrict d,
   1701                              bool listen_only)
   1702 {
   1703 #ifdef MHD_USE_TRACE_POLLING_FDS
   1704 #  ifdef MHD_SOCKETS_KIND_POSIX
   1705   static const char poll_fn_name[] = "poll";
   1706 #  else  /* MHD_SOCKETS_KIND_WINSOCK */
   1707   static const char poll_fn_name[] = "WSAPoll";
   1708 #  endif /* MHD_SOCKETS_KIND_WINSOCK */
   1709 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1710   unsigned int num_fds;
   1711   int max_wait;
   1712   int num_events;
   1713 
   1714   mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type);
   1715 
   1716   num_fds = poll_update_fds (d, listen_only);
   1717 
   1718   // TODO: handle empty list situation
   1719   max_wait = get_max_wait (d);
   1720 
   1721 #ifdef MHD_USE_TRACE_POLLING_FDS
   1722   fprintf (stderr,
   1723            "### (Starting) %s(fds, %u, %d)...\n",
   1724            poll_fn_name,
   1725            num_fds,
   1726            max_wait);
   1727 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1728   num_events = mhd_poll (d->events.data.poll.fds,
   1729                          num_fds,
   1730                          max_wait); // TODO: use correct timeout value
   1731 #ifdef MHD_USE_TRACE_POLLING_FDS
   1732   fprintf (stderr,
   1733            "### (Finished) %s(fds, %u, %d) -> %d\n",
   1734            poll_fn_name,
   1735            num_fds,
   1736            max_wait,
   1737            num_events);
   1738 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1739   if (0 > num_events)
   1740   {
   1741     int err;
   1742     bool is_hard_error;
   1743     bool is_ignored_error;
   1744     is_hard_error = false;
   1745     is_ignored_error = false;
   1746 #if defined(MHD_SOCKETS_KIND_POSIX)
   1747     err = errno;
   1748     if (0 != err)
   1749     {
   1750       is_hard_error =
   1751         ((mhd_EFAULT_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err));
   1752       is_ignored_error = (mhd_EINTR_OR_ZERO == err);
   1753     }
   1754 #elif defined(MHD_SOCKETS_KIND_WINSOCK)
   1755     err = WSAGetLastError ();
   1756     is_hard_error =
   1757       ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err));
   1758 #endif
   1759     if (! is_ignored_error)
   1760     {
   1761       if (is_hard_error)
   1762       {
   1763         mhd_LOG_MSG (d, MHD_SC_POLL_HARD_ERROR, \
   1764                      "The poll() encountered unrecoverable error.");
   1765         return false;
   1766       }
   1767       mhd_LOG_MSG (d, MHD_SC_POLL_SOFT_ERROR, \
   1768                    "The poll() encountered error.");
   1769     }
   1770     return true;
   1771   }
   1772 
   1773   return poll_update_statuses_from_fds (d, num_events);
   1774 }
   1775 
   1776 
   1777 #endif /* MHD_SUPPORT_POLL */
   1778 
   1779 #ifdef MHD_SUPPORT_EPOLL
   1780 
   1781 /**
   1782  * Map events provided by epoll to connection states, ITC and
   1783  * listen socket states
   1784  */
   1785 static MHD_FN_PAR_NONNULL_ (1) bool
   1786 update_statuses_from_eevents (struct MHD_Daemon *restrict d,
   1787                               unsigned int num_events)
   1788 {
   1789   unsigned int i;
   1790   struct epoll_event *const restrict events =
   1791     d->events.data.epoll.events;
   1792   for (i = 0; num_events > i; ++i)
   1793   {
   1794     struct epoll_event *const e = events + i;
   1795 #ifdef MHD_SUPPORT_THREADS
   1796     if (((uint64_t) mhd_SOCKET_REL_MARKER_ITC) == e->data.u64) /* uint64_t is in the system header */
   1797     {
   1798       mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
   1799       dbg_print_fd_state_update ( \
   1800         "ITC", \
   1801         mhd_itc_r_fd (d->threading.itc), \
   1802         0 != (e->events & EPOLLIN), \
   1803         0 != (e->events & EPOLLOUT), \
   1804         0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)));
   1805 
   1806       if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)))
   1807       {
   1808         log_itc_broken (d);
   1809         /* ITC is broken, need to stop the daemon thread now as otherwise
   1810            application will not be able to stop the thread. */
   1811         return false;
   1812       }
   1813       if (0 != (e->events & EPOLLIN))
   1814       {
   1815         /* Clear ITC here, before other data processing.
   1816          * Any external events will activate ITC again if additional data to
   1817          * process is added externally. Clearing ITC early ensures that new data
   1818          * (with additional ITC activation) will not be missed. */
   1819         mhd_itc_clear (d->threading.itc);
   1820       }
   1821     }
   1822     else
   1823 #endif /* MHD_SUPPORT_THREADS */
   1824     if (((uint64_t) mhd_SOCKET_REL_MARKER_LISTEN) == e->data.u64) /* uint64_t is in the system header */
   1825     {
   1826       mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd);
   1827       dbg_print_fd_state_update ( \
   1828         "lstn", \
   1829         d->net.listen.fd, \
   1830         0 != (e->events & EPOLLIN), \
   1831         0 != (e->events & EPOLLOUT), \
   1832         0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)));
   1833       if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)))
   1834       {
   1835         log_listen_broken (d);
   1836 
   1837         /* Close the listening socket unless the master daemon should close it */
   1838         if (! mhd_D_HAS_MASTER (d))
   1839           mhd_socket_close (d->net.listen.fd);
   1840         else
   1841         {
   1842           /* Ignore possible error as the socket could be already removed
   1843              from the epoll monitoring by closing the socket */
   1844           (void) epoll_ctl (d->events.data.epoll.e_fd,
   1845                             EPOLL_CTL_DEL,
   1846                             d->net.listen.fd,
   1847                             NULL);
   1848         }
   1849 
   1850         d->events.accept_pending = false;
   1851         d->net.listen.is_broken = true;
   1852         d->net.listen.fd = MHD_INVALID_SOCKET;
   1853       }
   1854       else
   1855         d->events.accept_pending = (0 != (e->events & EPOLLIN));
   1856     }
   1857     else
   1858     {
   1859       bool recv_ready;
   1860       bool send_ready;
   1861       bool err_state;
   1862       struct MHD_Connection *const restrict c =
   1863         (struct MHD_Connection *) e->data.ptr;
   1864       mhd_assert (! is_conn_excluded_from_http_comm (c));
   1865       recv_ready = (0 != (e->events & (EPOLLIN | EPOLLERR | EPOLLHUP)));
   1866       send_ready = (0 != (e->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)));
   1867       err_state = (0 != (e->events & (EPOLLERR | EPOLLHUP)));
   1868 
   1869       update_conn_net_status (d, c, recv_ready, send_ready, err_state);
   1870     }
   1871   }
   1872   return true;
   1873 }
   1874 
   1875 
   1876 /**
   1877  * Update states of all connections, check for connection pending
   1878  * to be accept()'ed, check for the events on ITC.
   1879  */
   1880 static MHD_FN_PAR_NONNULL_ (1) bool
   1881 get_all_net_updates_by_epoll (struct MHD_Daemon *restrict d)
   1882 {
   1883   int max_events;
   1884   int num_events;
   1885   unsigned int events_processed;
   1886   int max_wait;
   1887   mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type);
   1888   mhd_assert (0 < ((int) d->events.data.epoll.num_elements));
   1889   mhd_assert (0 <= ((int) d->conns.count));
   1890   mhd_assert (d->events.data.epoll.num_elements == \
   1891               (size_t) ((int) d->events.data.epoll.num_elements));
   1892   mhd_assert (0 != d->events.data.epoll.num_elements);
   1893   mhd_assert (0 != d->conns.cfg.count_limit);
   1894   mhd_assert (d->events.data.epoll.num_elements == d->dbg.num_events_elements);
   1895 
   1896   // TODO: add listen socket enable/disable
   1897 
   1898   /* Minimise amount of data passed from userspace to kernel and back */
   1899   max_events = (int) d->conns.count;
   1900 #ifdef MHD_SUPPORT_THREADS
   1901   ++max_events;
   1902 #endif /* MHD_SUPPORT_THREADS */
   1903   if (MHD_INVALID_SOCKET != d->net.listen.fd)
   1904     ++max_events;
   1905   /* Make sure that one extra slot used to clearly detect that all events
   1906    * were gotten. */
   1907   ++max_events;
   1908   if ((0 > max_events) ||
   1909       (max_events > (int) d->events.data.epoll.num_elements))
   1910     max_events = (int) d->events.data.epoll.num_elements;
   1911 
   1912   events_processed = 0;
   1913   max_wait = get_max_wait (d);
   1914   do
   1915   {
   1916 #ifdef MHD_USE_TRACE_POLLING_FDS
   1917     fprintf (stderr,
   1918              "### (Starting) epoll_wait(%d, events, %d, %d)...\n",
   1919              d->events.data.epoll.e_fd,
   1920              (int) d->events.data.epoll.num_elements,
   1921              max_wait);
   1922 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1923     num_events = epoll_wait (d->events.data.epoll.e_fd,
   1924                              d->events.data.epoll.events,
   1925                              max_events,
   1926                              max_wait);
   1927 #ifdef MHD_USE_TRACE_POLLING_FDS
   1928     fprintf (stderr,
   1929              "### (Finished) epoll_wait(%d, events, %d, %d) -> %d\n",
   1930              d->events.data.epoll.e_fd,
   1931              max_events,
   1932              max_wait,
   1933              num_events);
   1934 #endif /* MHD_USE_TRACE_POLLING_FDS */
   1935     max_wait = 0;
   1936     if (0 > num_events)
   1937     {
   1938       const int err = errno;
   1939       if (EINTR != err)
   1940       {
   1941         mhd_LOG_MSG (d, MHD_SC_EPOLL_HARD_ERROR, \
   1942                      "The epoll_wait() encountered unrecoverable error.");
   1943         return false;
   1944       }
   1945       return true; /* EINTR, try next time */
   1946     }
   1947     if (! update_statuses_from_eevents (d, (unsigned int) num_events))
   1948       return false;
   1949     if (max_events > num_events)
   1950       return true; /* All events have been read */
   1951 
   1952     /* Use all buffer for the next getting events round(s) */
   1953     max_events = (int) d->events.data.epoll.num_elements;
   1954     mhd_assert (0 < max_events);
   1955     mhd_assert (d->events.data.epoll.num_elements == (size_t) max_events);
   1956     max_wait = 0; /* Do not block on the next getting events rounds */
   1957 
   1958     events_processed += (unsigned int) num_events; /* Avoid reading too many events */
   1959   } while ((events_processed < d->conns.cfg.count_limit)
   1960            || (events_processed < d->conns.cfg.count_limit + 2));
   1961 
   1962   return true;
   1963 }
   1964 
   1965 
   1966 #endif /* MHD_SUPPORT_EPOLL */
   1967 
   1968 #ifdef MHD_SUPPORT_KQUEUE
   1969 
   1970 static MHD_FN_PAR_NONNULL_ALL_
   1971 MHD_FN_PAR_IN_ (2) void
   1972 kqueue_handle_missed_change (struct MHD_Daemon *restrict d,
   1973                              const struct kevent *restrict upd_event)
   1974 {
   1975   mhd_assert (mhd_D_IS_USING_KQUEUE (d));
   1976   mhd_ASSUME (mhd_SOCKET_REL_PTRMARKER_EMPTY != mhd_KE_GET_UDATA (upd_event));
   1977   mhd_ASSUME (mhd_SOCKET_REL_PTRMARKER_ITC != mhd_KE_GET_UDATA (upd_event));
   1978 
   1979   if (mhd_SOCKET_REL_PTRMARKER_LISTEN == mhd_KE_GET_UDATA (upd_event))
   1980   {
   1981     return;
   1982   }
   1983   else
   1984   {
   1985     struct MHD_Connection *const restrict c =
   1986       (struct MHD_Connection *) mhd_KE_GET_UDATA (upd_event);
   1987 
   1988     mhd_ASSUME (d == c->daemon);
   1989 
   1990     mhd_conn_start_closing_no_sys_res (c);
   1991     mhd_conn_pre_clean (c);
   1992     mhd_conn_remove_from_daemon (c);
   1993     mhd_conn_close_final (c);
   1994   }
   1995 }
   1996 
   1997 
   1998 static MHD_FN_PAR_NONNULL_ALL_
   1999 MHD_FN_PAR_IN_SIZE_ (2,3) void
   2000 kqueue_handle_missed_changes (struct MHD_Daemon *restrict d,
   2001                               struct kevent *restrict kes,
   2002                               int num_elements)
   2003 {
   2004   int i;
   2005 
   2006   mhd_ASSUME (0 < num_elements);
   2007 
   2008   for (i = 0; i < num_elements; ++i)
   2009     kqueue_handle_missed_change (d,
   2010                                  kes + i);
   2011 }
   2012 
   2013 
   2014 static MHD_FN_PAR_NONNULL_ALL_ MHD_FN_MUST_CHECK_RESULT_ int
   2015 update_kqueue_monitoring (struct MHD_Daemon *restrict d)
   2016 {
   2017   struct MHD_Connection *c;
   2018   struct kevent *restrict kes = d->events.data.kq.kes;
   2019   int num_updates;
   2020   const int max_changes = (int) d->events.data.kq.num_elements;
   2021 
   2022   mhd_assert (mhd_D_IS_USING_KQUEUE (d));
   2023   mhd_assert (NULL != kes);
   2024   mhd_assert (2 <= max_changes);
   2025 
   2026   num_updates = 0;
   2027 
   2028   if (MHD_INVALID_SOCKET != d->net.listen.fd)
   2029   {
   2030     mhd_assert (! d->net.listen.is_broken);
   2031 
   2032     mhd_KE_SET (kes + num_updates,
   2033                 d->net.listen.fd,
   2034                 EVFILT_READ,
   2035                 (d->conns.block_new ? EV_DISABLE : EV_ENABLE)
   2036                 | mhd_EV_KEEPUDATA_OR_ZERO,
   2037                 mhd_SOCKET_REL_PTRMARKER_LISTEN);
   2038 
   2039     mhd_dbg_print_kevent_change ("lstn",
   2040                                  kes + num_updates);
   2041     ++num_updates;
   2042   }
   2043 
   2044   /* Process unmonitored connections starting from the earliest added
   2045      unmonitored connection */
   2046 
   2047   c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
   2048 
   2049   if ((NULL == c) || (c->events.kq.monitored))
   2050     return num_updates;
   2051 
   2052   while (1)
   2053   {
   2054     struct MHD_Connection *const next_c = mhd_DLINKEDL_GET_NEXT (c,all_conn);
   2055     if (NULL == next_c)
   2056       break; /* Found the end of the list */
   2057     if (next_c->events.kq.monitored)
   2058       break; /* Found the earliest added unmonitored connection */
   2059     c = next_c;
   2060   }
   2061 
   2062   mhd_ASSUME (NULL != c);
   2063   mhd_ASSUME (! c->events.kq.monitored);
   2064 
   2065   for ((void) c; NULL != c; c = mhd_DLINKEDL_GET_PREV (c,all_conn))
   2066   {
   2067     mhd_ASSUME (! c->events.kq.monitored);
   2068 
   2069     mhd_assert (! is_conn_excluded_from_http_comm (c));
   2070     mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage);
   2071 
   2072     /* Check for the space for two filters */
   2073     if ((max_changes - 1) <= num_updates)
   2074     {
   2075       /* Too many updates for a single kevent() call */
   2076       static const struct timespec zero_timeout = {0, 0};
   2077       int res;
   2078 
   2079 #ifdef MHD_USE_TRACE_POLLING_FDS
   2080       fprintf (stderr,
   2081                "### (Starting) kevent(%d, changes, %d, [NULL], "
   2082                "0, [0, 0])...\n",
   2083                d->events.data.kq.kq_fd,
   2084                num_updates);
   2085 #endif /* MHD_USE_TRACE_POLLING_FDS */
   2086       res = mhd_kevent (d->events.data.kq.kq_fd,
   2087                         kes,
   2088                         num_updates,
   2089                         NULL,
   2090                         0,
   2091                         &zero_timeout);
   2092 #ifdef MHD_USE_TRACE_POLLING_FDS
   2093       fprintf (stderr,
   2094                "### (Finished) kevent(%d, changes, %d, [NULL], "
   2095                "0, [0, 0]) -> %d\n",
   2096                d->events.data.kq.kq_fd,
   2097                num_updates,
   2098                res);
   2099 #endif /* MHD_USE_TRACE_POLLING_FDS */
   2100       if (0 > res)
   2101       {
   2102         if (EINTR != errno)
   2103           kqueue_handle_missed_changes (d,
   2104                                         kes,
   2105                                         num_updates);
   2106 
   2107       }
   2108       num_updates = 0;
   2109     }
   2110 
   2111     /* Add recv() filter */
   2112     mhd_KE_SET (kes + num_updates,
   2113                 c->sk.fd,
   2114                 EVFILT_READ,
   2115                 EV_ADD | EV_CLEAR, /* 'EV_CLEAR' means edge trigger */
   2116                 c);
   2117     mhd_dbg_print_kevent_change ("conn",
   2118                                  kes + num_updates);
   2119 
   2120     ++num_updates;
   2121 
   2122     /* Add send() filter */
   2123     mhd_KE_SET (kes + num_updates,
   2124                 c->sk.fd,
   2125                 EVFILT_WRITE,
   2126                 EV_ADD | EV_CLEAR, /* 'EV_CLEAR' means edge trigger */
   2127                 c);
   2128     mhd_dbg_print_kevent_change ("conn",
   2129                                  kes + num_updates);
   2130 
   2131     ++num_updates;
   2132 
   2133     c->events.kq.monitored = true;
   2134 
   2135     mhd_assert (0 < num_updates);
   2136   }
   2137 
   2138   mhd_assert (0 <= num_updates);
   2139   mhd_assert (num_updates <= (int) d->events.data.kq.num_elements);
   2140 
   2141   return num_updates;
   2142 }
   2143 
   2144 
   2145 /**
   2146  * Map events provided by kqueue to connection states, ITC and
   2147  * listen socket states
   2148  */
   2149 static MHD_FN_PAR_NONNULL_ (1) bool
   2150 update_statuses_from_kevents (struct MHD_Daemon *restrict d,
   2151                               unsigned int num_events)
   2152 {
   2153   unsigned int i;
   2154   struct kevent *restrict kes = d->events.data.kq.kes;
   2155 
   2156   mhd_assert (mhd_D_IS_USING_KQUEUE (d));
   2157 
   2158   for (i = 0u; num_events > i; ++i)
   2159   {
   2160     struct kevent *const e = kes + i;
   2161     bool eof_ready;
   2162 #ifdef MHD_SUPPORT_THREADS
   2163     if (mhd_SOCKET_REL_PTRMARKER_ITC == mhd_KE_GET_UDATA (e))
   2164     {
   2165       mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
   2166       mhd_assert (mhd_itc_r_fd (d->threading.itc) == (int) e->ident);
   2167       mhd_assert (EVFILT_READ == e->filter);
   2168       mhd_assert (0 == (e->flags & EV_ERROR));
   2169 
   2170       eof_ready = (0 != (e->flags & EV_EOF));
   2171 
   2172       mhd_dbg_print_kevent_report ("ITC",
   2173                                    e);
   2174 
   2175       if (eof_ready)
   2176       {
   2177         log_itc_broken (d);
   2178         /* ITC is broken, need to stop the daemon thread now as otherwise
   2179            application will not be able to stop the thread. */
   2180         return false;
   2181       }
   2182       /* Clear ITC here, before other data processing.
   2183          Any external events will activate ITC again if additional data to
   2184          process is added externally. Clearing ITC early ensures that new data
   2185          (which followed by ITC activation) will not be missed. */
   2186       mhd_itc_clear (d->threading.itc);
   2187     }
   2188     else
   2189 #endif /* MHD_SUPPORT_THREADS */
   2190     if (mhd_SOCKET_REL_PTRMARKER_LISTEN == mhd_KE_GET_UDATA (e))
   2191     {
   2192       bool listen_broken;
   2193       mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd);
   2194       mhd_assert (d->net.listen.fd == (int) e->ident);
   2195       mhd_assert (EVFILT_READ == e->filter);
   2196 
   2197       eof_ready = (0 != (e->flags & EV_EOF));
   2198 
   2199       mhd_dbg_print_kevent_report ("lstn",
   2200                                    e);
   2201 
   2202       listen_broken = false;
   2203       if (eof_ready)
   2204         listen_broken = true;
   2205       else if ((0 != (e->flags & EV_ERROR)))
   2206         listen_broken = true;
   2207 
   2208       if (listen_broken)
   2209       {
   2210         log_listen_broken (d);
   2211 
   2212         /* Close the listening socket unless the master daemon should close it */
   2213         if (! mhd_D_HAS_MASTER (d))
   2214           mhd_socket_close (d->net.listen.fd);
   2215         else
   2216         {
   2217           static const struct timespec zero_timeout = {0, 0};
   2218           struct kevent remove_listen;
   2219           int res;
   2220 
   2221           mhd_KE_SET (&remove_listen,
   2222                       d->net.listen.fd,
   2223                       EVFILT_READ,
   2224                       EV_DELETE,
   2225                       mhd_SOCKET_REL_PTRMARKER_LISTEN);
   2226 
   2227 #ifdef MHD_USE_TRACE_POLLING_FDS
   2228           fprintf (stderr,
   2229                    "### (Starting) kevent(%d, changes, 1, [NULL], "
   2230                    "0, [0, 0])...\n",
   2231                    d->events.data.kq.kq_fd);
   2232 #endif /* MHD_USE_TRACE_POLLING_FDS */
   2233           res = mhd_kevent (d->events.data.kq.kq_fd,
   2234                             &remove_listen,
   2235                             1,
   2236                             NULL,
   2237                             0,
   2238                             &zero_timeout);
   2239 #ifdef MHD_USE_TRACE_POLLING_FDS
   2240           fprintf (stderr,
   2241                    "### (Finished) kevent(%d, changes, 1, [NULL], "
   2242                    "0, [0, 0]) -> %d\n",
   2243                    d->events.data.kq.kq_fd,
   2244                    res);
   2245 #endif /* MHD_USE_TRACE_POLLING_FDS */
   2246           /* Ignore possible error as the socket could be already removed
   2247              from the kqueue monitoring by closing the socket */
   2248           (void) res;
   2249         }
   2250 
   2251         d->events.accept_pending = false;
   2252         d->net.listen.is_broken = true;
   2253         d->net.listen.fd = MHD_INVALID_SOCKET;
   2254       }
   2255       else
   2256         d->events.accept_pending = true;
   2257     }
   2258     else
   2259     {
   2260       bool err_ready;
   2261       bool recv_ready;
   2262       bool send_ready;
   2263       struct MHD_Connection *const restrict c =
   2264         (struct MHD_Connection *) mhd_KE_GET_UDATA (e);
   2265 
   2266       mhd_ASSUME (d == c->daemon);
   2267       mhd_assert (c->events.kq.monitored);
   2268       mhd_ASSUME (mhd_SOCKET_REL_PTRMARKER_EMPTY != mhd_KE_GET_UDATA (e));
   2269 
   2270       mhd_dbg_print_kevent_report ("conn",
   2271                                    e);
   2272 
   2273       if ((0 != (e->flags & EV_ERROR)))
   2274       {
   2275         /* Error adding connection to monitoring */
   2276         kqueue_handle_missed_change (d,
   2277                                      e);
   2278 
   2279         continue;
   2280       }
   2281 
   2282       eof_ready = (0 != (e->flags & EV_EOF));
   2283       err_ready = (eof_ready && (0 != e->fflags));
   2284 
   2285       if (err_ready)
   2286       {
   2287         c->sk.state.discnt_err =
   2288           mhd_socket_error_get_from_sys_err ((int) e->fflags);
   2289         mhd_assert (mhd_SOCKET_ERR_IS_HARD (c->sk.state.discnt_err));
   2290       }
   2291       /* This is a tricky processing as each "filter" updates only its own
   2292          side of the monitoring, not giving a picture of a complete socket
   2293          readiness. */
   2294 
   2295       if (EVFILT_READ == e->filter)
   2296       {
   2297         recv_ready = true;
   2298         send_ready = mhd_SCKT_NET_ST_HAS_FLAG_SEND (c->sk.ready);
   2299       }
   2300       else
   2301       {
   2302         mhd_assert (EVFILT_WRITE == e->filter);
   2303         recv_ready = mhd_SCKT_NET_ST_HAS_FLAG_RECV (c->sk.ready);
   2304         send_ready = true;
   2305       }
   2306 
   2307       update_conn_net_status (d,
   2308                               c,
   2309                               recv_ready,
   2310                               send_ready,
   2311                               err_ready
   2312                               || mhd_SCKT_NET_ST_HAS_FLAG_ERROR (c->sk.ready));
   2313     }
   2314   }
   2315   return true;
   2316 }
   2317 
   2318 
   2319 /**
   2320  * Update states of all connections, check for connection pending
   2321  * to be accept()'ed, check for the events on ITC.
   2322  */
   2323 static MHD_FN_PAR_NONNULL_ (1) bool
   2324 get_all_net_updates_by_kqueue (struct MHD_Daemon *restrict d)
   2325 {
   2326   int max_events;
   2327   int num_events;
   2328   int num_updates;
   2329   size_t events_processed;
   2330   uint_fast64_t max_wait;
   2331   struct timespec ke_timeout;
   2332 
   2333   mhd_assert (mhd_D_IS_USING_KQUEUE (d));
   2334   mhd_assert (0 < d->events.data.kq.kq_fd);
   2335   mhd_assert (0 < (int) (d->events.data.kq.num_elements));
   2336   mhd_assert (0 != d->events.data.kq.num_elements);
   2337   mhd_assert (0 != d->conns.cfg.count_limit);
   2338   mhd_assert (d->events.data.kq.num_elements == d->dbg.num_events_elements);
   2339 
   2340   num_updates = update_kqueue_monitoring (d);
   2341   mhd_ASSUME (0 <= num_updates);
   2342 
   2343   /* Minimise amount of data passed from userspace to kernel and back */
   2344   max_events = (int) (d->conns.count * 2);
   2345 #ifdef MHD_SUPPORT_THREADS
   2346   ++max_events;
   2347 #endif /* MHD_SUPPORT_THREADS */
   2348   if (MHD_INVALID_SOCKET != d->net.listen.fd)
   2349     ++max_events;
   2350   /* Make sure that one extra slot used to clearly detect that all events
   2351      were gotten (if all provided slots are used then extra event could be
   2352      pending still). */
   2353   ++max_events;
   2354   if ((0 >= max_events) ||
   2355       (max_events > (int) d->events.data.kq.num_elements))
   2356     max_events = (int) d->events.data.kq.num_elements;
   2357 
   2358   max_wait = mhd_daemon_get_wait_max (d);
   2359   ke_timeout.tv_sec = (time_t) (max_wait / 1000);
   2360   ke_timeout.tv_nsec = (long) ((max_wait % 1000) * 1000000L);
   2361   events_processed = 0;
   2362   do
   2363   {
   2364 #ifdef MHD_USE_TRACE_POLLING_FDS
   2365     if (max_wait == MHD_WAIT_INDEFINITELY)
   2366       fprintf (stderr,
   2367                "### (Starting) kevent(%d, changes, %d, events, "
   2368                "%d, [NULL])...\n",
   2369                d->events.data.kq.kq_fd,
   2370                num_updates,
   2371                max_events);
   2372     else
   2373       fprintf (stderr,
   2374                "### (Starting) kevent(%d, changes, %d, events, "
   2375                "%d, [%llu, %llu])...\n",
   2376                d->events.data.kq.kq_fd,
   2377                num_updates,
   2378                max_events,
   2379                (unsigned long long) ke_timeout.tv_sec,
   2380                (unsigned long long) ke_timeout.tv_nsec);
   2381 #endif /* MHD_USE_TRACE_POLLING_FDS */
   2382     num_events =
   2383       kevent (d->events.data.kq.kq_fd,
   2384               d->events.data.kq.kes,
   2385               num_updates,
   2386               d->events.data.kq.kes,
   2387               max_events,
   2388               (max_wait == MHD_WAIT_INDEFINITELY) ? NULL : &ke_timeout);
   2389 #ifdef MHD_USE_TRACE_POLLING_FDS
   2390     if (max_wait == MHD_WAIT_INDEFINITELY)
   2391       fprintf (stderr,
   2392                "### (Finished) kevent(%d, changes, %d, events, "
   2393                "%d, [NULL]) -> %d\n",
   2394                d->events.data.kq.kq_fd,
   2395                num_updates,
   2396                max_events,
   2397                num_events);
   2398     else
   2399       fprintf (stderr,
   2400                "### (Finished) kevent(%d, changes, %d, events, "
   2401                "%d, [%llu, %llu]) -> %d\n",
   2402                d->events.data.kq.kq_fd,
   2403                num_updates,
   2404                max_events,
   2405                (unsigned long long) ke_timeout.tv_sec,
   2406                (unsigned long long) ke_timeout.tv_nsec,
   2407                num_events);
   2408 #endif /* MHD_USE_TRACE_POLLING_FDS */
   2409 
   2410     if (0 > num_events)
   2411     {
   2412       const int err = errno;
   2413       if (EINTR == err)
   2414         return true; /* EINTR, try next time */
   2415 
   2416       mhd_LOG_MSG (d, MHD_SC_KQUEUE_HARD_ERROR, \
   2417                    "The kevent() encountered unrecoverable error.");
   2418       return false;
   2419     }
   2420     if (! update_statuses_from_kevents (d,
   2421                                         (unsigned int) num_events))
   2422       return false;
   2423     if (max_events > num_events)
   2424       return true; /* All events have been read */
   2425 
   2426     /* Use all slots for the next round(s) of getting events  */
   2427     max_events = (int) d->events.data.kq.num_elements;
   2428     max_wait = 0; /* Do not block on the next getting events rounds */
   2429     ke_timeout.tv_sec = 0;
   2430     ke_timeout.tv_nsec = 0;
   2431 
   2432     mhd_assert (0 < max_events);
   2433 
   2434     /* If too many events are coming - process events that have been read already */
   2435     events_processed += (size_t) num_events;
   2436   } while ((events_processed < (d->conns.cfg.count_limit * 2))
   2437            || (events_processed < (d->conns.cfg.count_limit * 2) + 2));
   2438 
   2439   return true;
   2440 }
   2441 
   2442 
   2443 #endif /* MHD_SUPPORT_KQUEUE */
   2444 
   2445 
   2446 /**
   2447  * Close timed-out connections (if any)
   2448  * @param d the daemon to use
   2449  */
   2450 static MHD_FN_PAR_NONNULL_ALL_ void
   2451 daemon_close_timedout_conns (struct MHD_Daemon *restrict d)
   2452 {
   2453   struct MHD_Connection *c;
   2454   struct MHD_Connection *prev_c;
   2455 
   2456 #if defined(MHD_SUPPORT_THREADS)
   2457   mhd_assert (! mhd_D_HAS_WORKERS (d));
   2458   mhd_assert (! mhd_D_HAS_THR_PER_CONN (d));
   2459 #endif /* MHD_SUPPORT_THREADS */
   2460 
   2461   /* Check "normal" timeouts list */
   2462   c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.def_timeout));
   2463 
   2464   while (NULL != c)
   2465   {
   2466     mhd_assert (! c->timeout.in_cstm_tmout_list);
   2467     mhd_assert (0u != d->conns.cfg.timeout_milsec);
   2468 
   2469     if (mhd_conn_is_timeout_expired (c))
   2470     {
   2471       prev_c = mhd_DLINKEDL_GET_PREV (&(c->timeout),
   2472                                       tmout_list);
   2473       mhd_conn_start_closing_timedout (c);
   2474       mhd_conn_pre_clean (c);
   2475       mhd_conn_remove_from_daemon (c);
   2476       mhd_conn_close_final (c);
   2477 
   2478       c = prev_c;
   2479     }
   2480     else
   2481       break; /* DL-list is sorted, no need to check the rest of the list */
   2482   }
   2483 
   2484   /* Check "custom" timeouts list */
   2485   c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.cust_timeout));
   2486 
   2487   while (NULL != c)
   2488   {
   2489     mhd_assert (c->timeout.in_cstm_tmout_list);
   2490 
   2491     prev_c = mhd_DLINKEDL_GET_PREV (&(c->timeout),
   2492                                     tmout_list);
   2493 
   2494     if (mhd_conn_is_timeout_expired (c))
   2495     {
   2496       mhd_conn_start_closing_timedout (c);
   2497       mhd_conn_pre_clean (c);
   2498       mhd_conn_remove_from_daemon (c);
   2499       mhd_conn_close_final (c);
   2500     }
   2501 
   2502     /* "Custom" timeouts list is not sorted, check all members */
   2503     c = prev_c;
   2504   }
   2505 }
   2506 
   2507 
   2508 /**
   2509  * Prepare daemon's data for the new round of connections processing
   2510  * @param d the daemon to use
   2511  */
   2512 static MHD_FN_PAR_NONNULL_ALL_ void
   2513 daemon_reset_per_round_data (struct MHD_Daemon *restrict d)
   2514 {
   2515   d->events.time.is_set = false;
   2516 }
   2517 
   2518 
   2519 /**
   2520  * Perform one round of daemon connection and data processing.
   2521  *
   2522  * This function do the following:
   2523  * + poll all connections and daemon FDs (if internal polling is used);
   2524  * + resume connections pending to be resumed;
   2525  * + update connection statuses based on socket states (recv/send ready or
   2526  *   disconnect detection);
   2527  * + receive, send and/or parse connections data as needed, including call of
   2528  *   callbacks for processing requests and response generation;
   2529  * + close broken connections;
   2530  * + accept new connection (if needed);
   2531  * + cleanup closed "upgraded" connections.
   2532  * @param d the daemon to use
   2533  * @return 'true' on success,
   2534  *         'false' if daemon is broken
   2535  */
   2536 static MHD_FN_PAR_NONNULL_ (1) bool
   2537 process_all_events_and_data (struct MHD_Daemon *restrict d)
   2538 {
   2539   daemon_reset_per_round_data (d);
   2540 
   2541   switch (d->events.poll_type)
   2542   {
   2543   case mhd_POLL_TYPE_EXT:
   2544     mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
   2545     if (! ext_events_process_net_updates_and_resume_conn (d))
   2546       return false;
   2547     break;
   2548 #ifdef MHD_SUPPORT_SELECT
   2549   case mhd_POLL_TYPE_SELECT:
   2550     if (! get_all_net_updates_by_select_and_resume_conn (d, false))
   2551       return false;
   2552     break;
   2553 #endif /* MHD_SUPPORT_SELECT */
   2554 #ifdef MHD_SUPPORT_POLL
   2555   case mhd_POLL_TYPE_POLL:
   2556     if (! get_all_net_updates_by_poll (d, false))
   2557       return false;
   2558     daemon_resume_conns_if_needed (d);
   2559     break;
   2560 #endif /* MHD_SUPPORT_POLL */
   2561 #ifdef MHD_SUPPORT_EPOLL
   2562   case mhd_POLL_TYPE_EPOLL:
   2563     if (! get_all_net_updates_by_epoll (d))
   2564       return false;
   2565     daemon_resume_conns_if_needed (d);
   2566     break;
   2567 #endif /* MHD_SUPPORT_EPOLL */
   2568 #ifdef MHD_SUPPORT_KQUEUE
   2569   case mhd_POLL_TYPE_KQUEUE:
   2570     if (! get_all_net_updates_by_kqueue (d))
   2571       return false;
   2572     daemon_resume_conns_if_needed (d);
   2573     break;
   2574 #endif /* MHD_SUPPORT_KQUEUE */
   2575 #ifndef MHD_SUPPORT_SELECT
   2576   case mhd_POLL_TYPE_SELECT:
   2577 #endif /* ! MHD_SUPPORT_SELECT */
   2578 #ifndef MHD_SUPPORT_POLL
   2579   case mhd_POLL_TYPE_POLL:
   2580 #endif /* ! MHD_SUPPORT_POLL */
   2581   case mhd_POLL_TYPE_NOT_SET_YET:
   2582   default:
   2583     mhd_UNREACHABLE ();
   2584     MHD_PANIC ("Daemon data integrity broken");
   2585     break;
   2586   }
   2587 
   2588   mhd_daemon_process_ext_added_conns (d);
   2589 
   2590   if (d->events.accept_pending && ! d->conns.block_new)
   2591     d->events.accept_pending = ! daemon_accept_new_conns (d);
   2592 
   2593   daemon_process_all_active_conns (d);
   2594   daemon_close_timedout_conns (d);
   2595   daemon_cleanup_upgraded_conns (d);
   2596   return ! mhd_D_HAS_STOP_REQ (d);
   2597 }
   2598 
   2599 
   2600 static
   2601 MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
   2602 process_reg_events_int (struct MHD_Daemon *MHD_RESTRICT daemon,
   2603                         uint_fast64_t *MHD_RESTRICT next_max_wait)
   2604 {
   2605   enum MHD_StatusCode res;
   2606 
   2607   if (mhd_DAEMON_STATE_STARTED > daemon->state)
   2608     return MHD_SC_TOO_EARLY;
   2609   if (! mhd_WM_INT_HAS_EXT_EVENTS (daemon->wmode_int))
   2610     return MHD_SC_EXTERNAL_EVENT_ONLY;
   2611   if (mhd_DAEMON_STATE_STARTED < daemon->state)
   2612     return MHD_SC_TOO_LATE;
   2613 
   2614 #ifdef MHD_SUPPORT_THREADS
   2615   if (daemon->events.data.extr.itc_data.is_broken)
   2616     return MHD_SC_DAEMON_SYS_DATA_BROKEN;
   2617 #endif /* MHD_SUPPORT_THREADS */
   2618 
   2619   if (daemon->net.listen.is_broken)
   2620     return MHD_SC_DAEMON_SYS_DATA_BROKEN;
   2621 
   2622   /* Ignore returned value */
   2623   (void) process_all_events_and_data (daemon);
   2624 
   2625   if (NULL != next_max_wait)
   2626     *next_max_wait = MHD_WAIT_INDEFINITELY;
   2627 
   2628   res = ext_events_update_registrations (daemon);
   2629   if (MHD_SC_OK != res)
   2630     return res;
   2631 
   2632 #ifdef MHD_SUPPORT_THREADS
   2633   if (daemon->events.data.extr.itc_data.is_broken)
   2634   {
   2635     log_itc_broken (daemon);
   2636     return MHD_SC_DAEMON_SYS_DATA_BROKEN;
   2637   }
   2638 #endif /* MHD_SUPPORT_THREADS */
   2639 
   2640   if (daemon->net.listen.is_broken)
   2641   {
   2642     log_listen_broken (daemon);
   2643     return MHD_SC_DAEMON_SYS_DATA_BROKEN;
   2644   }
   2645 
   2646   if (NULL != next_max_wait)
   2647     *next_max_wait = mhd_daemon_get_wait_max (daemon);
   2648 
   2649   return MHD_SC_OK;
   2650 }
   2651 
   2652 
   2653 MHD_EXTERN_
   2654 MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
   2655 MHD_daemon_process_reg_events (struct MHD_Daemon *MHD_RESTRICT daemon,
   2656                                uint_fast64_t *MHD_RESTRICT next_max_wait)
   2657 {
   2658   enum MHD_StatusCode res;
   2659 #ifdef MHD_USE_TRACE_POLLING_FDS
   2660   fprintf (stderr,
   2661            "### (Starting) MHD_daemon_process_reg_events(daemon, [%s])...\n",
   2662            (NULL != next_max_wait) ? "non-NULL" : "NULL");
   2663 #endif
   2664   res = process_reg_events_int (daemon,
   2665                                 next_max_wait);
   2666 #ifdef MHD_USE_TRACE_POLLING_FDS
   2667   if (NULL == next_max_wait)
   2668     fprintf (stderr,
   2669              "### (Finished) MHD_daemon_process_reg_events(daemon, [NULL]) ->"
   2670              "%u\n",
   2671              (unsigned int) res);
   2672   else if (MHD_WAIT_INDEFINITELY == *next_max_wait)
   2673     fprintf (stderr,
   2674              "### (Finished) MHD_daemon_process_reg_events(daemon, "
   2675              "->MHD_WAIT_INDEFINITELY) ->%u\n",
   2676              (unsigned int) res);
   2677   else
   2678     fprintf (stderr,
   2679              "### (Finished) MHD_daemon_process_reg_events(daemon, ->%llu) "
   2680              "->%u\n",
   2681              (unsigned long long) *next_max_wait,
   2682              (unsigned int) res);
   2683 #endif
   2684   return res;
   2685 }
   2686 
   2687 
   2688 #ifdef MHD_SUPPORT_THREADS
   2689 
   2690 /**
   2691  * The entry point for the daemon worker thread
   2692  * @param cls the closure
   2693  */
   2694 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC
   2695 mhd_worker_all_events (void *cls)
   2696 {
   2697   struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls;
   2698   mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid));
   2699   mhd_assert (d->dbg.net_inited);
   2700   mhd_assert (! d->dbg.net_deinited);
   2701   mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type));
   2702   mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type));
   2703   mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY != d->threading.d_type);
   2704   mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type));
   2705   mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION != d->wmode_int);
   2706   mhd_assert (d->dbg.events_fully_inited);
   2707   mhd_assert (d->dbg.connections_inited);
   2708 
   2709 #ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE
   2710   // TODO: store and use the result
   2711   (void) mhd_thread_block_sigpipe ();
   2712 #endif
   2713 
   2714   while (! d->threading.stop_requested)
   2715   {
   2716     if (! process_all_events_and_data (d))
   2717       break;
   2718   }
   2719   if (! d->threading.stop_requested)
   2720   {
   2721     mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \
   2722                  "The daemon thread is stopping, but termination has not " \
   2723                  "been requested for the daemon.");
   2724   }
   2725   mhd_daemon_close_all_conns (d);
   2726 
   2727 #ifdef MHD_SUPPORT_HTTPS
   2728   if (mhd_D_HAS_TLS (d))
   2729     mhd_tls_thread_cleanup (d->tls);
   2730 #endif /* MHD_SUPPORT_HTTPS */
   2731 
   2732   return (mhd_THRD_RTRN_TYPE) 0;
   2733 }
   2734 
   2735 
   2736 static MHD_FN_PAR_NONNULL_ (1) bool
   2737 process_listening_and_itc_only (struct MHD_Daemon *restrict d)
   2738 {
   2739   if (false)
   2740     (void) 0;
   2741 #ifdef MHD_SUPPORT_SELECT
   2742   else if (mhd_POLL_TYPE_SELECT == d->events.poll_type)
   2743   {
   2744     return false; // TODO: implement
   2745   }
   2746 #endif /* MHD_SUPPORT_SELECT */
   2747 #ifdef MHD_SUPPORT_POLL
   2748   else if (mhd_POLL_TYPE_POLL == d->events.poll_type)
   2749   {
   2750     if (! get_all_net_updates_by_poll (d, true))
   2751       return false;
   2752   }
   2753 #endif /* MHD_SUPPORT_POLL */
   2754   else
   2755   {
   2756     (void) d; /* Mute compiler warning */
   2757     mhd_assert (0 && "Impossible value");
   2758     mhd_UNREACHABLE ();
   2759     MHD_PANIC ("Daemon data integrity broken");
   2760   }
   2761   // TODO: Accept connections
   2762   return false;
   2763 }
   2764 
   2765 
   2766 /**
   2767  * The entry point for the daemon listening thread
   2768  * @param cls the closure
   2769  */
   2770 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC
   2771 mhd_worker_listening_only (void *cls)
   2772 {
   2773   struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls;
   2774   mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid));
   2775 
   2776   mhd_assert (d->dbg.net_inited);
   2777   mhd_assert (! d->dbg.net_deinited);
   2778   mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type);
   2779   mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION == d->wmode_int);
   2780   mhd_assert (d->dbg.events_fully_inited);
   2781   mhd_assert (d->dbg.connections_inited);
   2782 
   2783 #ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE
   2784   // TODO: store and use the result
   2785   (void) mhd_thread_block_sigpipe ();
   2786 #endif
   2787 
   2788   while (! d->threading.stop_requested)
   2789   {
   2790     if (! process_listening_and_itc_only (d))
   2791       break;
   2792   }
   2793   if (! d->threading.stop_requested)
   2794   {
   2795     mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \
   2796                  "The daemon thread is stopping, but termination has " \
   2797                  "not been requested by the daemon.");
   2798   }
   2799 
   2800 #ifdef MHD_SUPPORT_HTTPS
   2801   if (mhd_D_HAS_TLS (d))
   2802     mhd_tls_thread_cleanup (d->tls);
   2803 #endif /* MHD_SUPPORT_HTTPS */
   2804 
   2805   return (mhd_THRD_RTRN_TYPE) 0;
   2806 }
   2807 
   2808 
   2809 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC
   2810 mhd_worker_connection (void *cls)
   2811 {
   2812   if (cls) // TODO: Implement
   2813     MHD_PANIC ("Not yet implemented");
   2814 
   2815 #if 0 // def MHD_SUPPORT_HTTPS
   2816   if (mhd_D_HAS_TLS (d))
   2817     mhd_tls_thread_cleanup (d->tls);
   2818 #endif /* MHD_SUPPORT_HTTPS */
   2819 
   2820   return (mhd_THRD_RTRN_TYPE) 0;
   2821 }
   2822 
   2823 
   2824 #endif /* MHD_SUPPORT_THREADS */