commit 5f6da77189e35f9eca129974082e57ffd997fe04
parent 700c4299dd54c41ce045c225f4dac79647090f26
Author: Christian Grothoff <christian@grothoff.org>
Date: Thu, 23 Apr 2026 22:54:02 +0200
fail 100 continue early properly
Diffstat:
3 files changed, 112 insertions(+), 3 deletions(-)
diff --git a/src/backend/paivana-httpd.h b/src/backend/paivana-httpd.h
@@ -74,5 +74,13 @@ extern int PH_global_ret;
*/
extern const struct GNUNET_CONFIGURATION_Handle *PH_cfg;
+/**
+ * Maximum size (in bytes) of a request body that we will buffer
+ * before forwarding it upstream. Requests exceeding this are
+ * rejected with HTTP 413. Settable via the `-u` / `--max-upload`
+ * command-line option; defaults to 1 MiB.
+ */
+extern unsigned long long PH_request_buffer_max;
+
#endif
diff --git a/src/backend/paivana-httpd_reverse.c b/src/backend/paivana-httpd_reverse.c
@@ -1054,10 +1054,42 @@ PAIVANA_HTTPD_reverse (struct HttpRequest *hr,
nowhere to land. */
if (! hr->accepted)
{
- /* FIXME: we should check that the content-length header value is
- acceptable here to not send the client 100 continue it it is
- too big */
+ /* If the client declared a Content-Length we already know is too
+ large, reject now: MHD is in HEADERS_PROCESSED so we can queue
+ a response, and rejecting here suppresses the implicit 100
+ Continue (RFC 7231 §5.1.1) for clients that asked for one and
+ avoids buffering the body just to throw it away. Chunked
+ uploads (no Content-Length) and clients that ignore 100
+ Continue and stream the body anyway still hit the
+ drain-then-reject path further down. */
+ const char *cl_str
+ = MHD_lookup_connection_value (con,
+ MHD_HEADER_KIND,
+ MHD_HTTP_HEADER_CONTENT_LENGTH);
hr->accepted = true;
+ if (NULL != cl_str)
+ {
+ char *endptr;
+ unsigned long long cl;
+
+ errno = 0;
+ cl = strtoull (cl_str,
+ &endptr,
+ 10);
+ if ( (0 == errno) &&
+ ('\0' == *endptr) &&
+ (cl > PH_request_buffer_max) )
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "Rejecting upload: Content-Length %llu exceeds %llu byte limit\n",
+ cl,
+ PH_request_buffer_max);
+ hr->reject_upload = true;
+ return MHD_queue_response (con,
+ MHD_HTTP_CONTENT_TOO_LARGE,
+ curl_failure_response);
+ }
+ }
return MHD_YES;
}
/* On the "final" access-handler call after we drained an
diff --git a/src/tests/test_reverse_proxy.sh b/src/tests/test_reverse_proxy.sh
@@ -399,6 +399,72 @@ function test_upload_too_big() {
ok
}
+function test_upload_too_big_early() {
+ # Open a raw TCP connection and send a POST whose Content-Length
+ # already exceeds the buffer cap, but DON'T send any body bytes.
+ # Paivana must reject on the Content-Length header alone (during
+ # MHD's HEADERS_PROCESSED callback), respond with 413 and close
+ # the connection. If the early-reject path is missing the server
+ # would block waiting for a body that never arrives and we'd hit
+ # the timeout, which fails the test rather than masquerading as
+ # a pass.
+ msg "Content-Length exceeding 1 MiB triggers early 413 (no body sent)"
+ local out
+ out="$( ( exec 3<>"/dev/tcp/127.0.0.1/$PAIVANA_PORT"
+ printf 'POST /upload HTTP/1.1\r\nHost: 127.0.0.1:%s\r\nContent-Length: 10485760\r\nConnection: close\r\n\r\n' \
+ "$PAIVANA_PORT" >&3
+ timeout 5 cat <&3 ) 2>"$TMPDIR/err")" \
+ || fail "raw POST failed (timeout or socket error): $(cat "$TMPDIR/err")"
+ case "$out" in
+ 'HTTP/1.1 413'*) ;;
+ *) fail "expected 413 status line, got: $(echo "$out" | head -c 80)";;
+ esac
+ ok
+}
+
+function test_upload_too_big_no_continue() {
+ # When the client opts in to 100-continue, paivana must NOT send
+ # the interim 100 response if it has already decided to reject
+ # the upload — it should jump straight to 413. Use curl with
+ # `Expect: 100-continue` and verbose tracing, then assert that
+ # no `< HTTP/1.1 100` line appeared on the wire.
+ msg "rejection suppresses 100 Continue when client opts in"
+ dd if=/dev/zero of="$TMPDIR/big" bs=1024 count=2048 status=none
+ local status
+ status="$(curl -sSv -X POST -H 'Expect: 100-continue' \
+ --expect100-timeout 5 \
+ --data-binary "@$TMPDIR/big" \
+ -o /dev/null -w '%{http_code}' \
+ "$(PAIVANA_URL /upload)" 2>"$TMPDIR/trace")" \
+ || fail "curl: $(cat "$TMPDIR/trace")"
+ [ "$status" = "413" ] || fail "status=$status want=413"
+ if grep -q '^< HTTP/1.1 100' "$TMPDIR/trace";
+ then
+ fail "server sent 100 Continue before 413; trace: $(grep '^<' "$TMPDIR/trace")"
+ fi
+ ok
+}
+
+function test_upload_too_big_chunked() {
+ # Chunked transfer-encoding has no Content-Length, so paivana
+ # cannot know the upload is too big until it actually reaches
+ # the cap mid-stream. This test guards the fallback
+ # drain-then-reject path that runs in BODY_RECEIVING /
+ # FULL_REQ_RECEIVED.
+ msg "chunked upload exceeding 1 MiB still yields 413 (drain path)"
+ dd if=/dev/zero of="$TMPDIR/big" bs=1024 count=2048 status=none
+ local status
+ status="$(curl -sS -X POST \
+ -H 'Transfer-Encoding: chunked' \
+ -H 'Content-Length:' \
+ --data-binary "@$TMPDIR/big" \
+ -o /dev/null -w '%{http_code}' \
+ "$(PAIVANA_URL /upload)" 2>"$TMPDIR/err")" \
+ || fail "curl: $(cat "$TMPDIR/err")"
+ [ "$status" = "413" ] || fail "status=$status want=413"
+ ok
+}
+
function test_upstream_down() {
msg "upstream down yields 502 Bad Gateway"
# Re-point paivana at a port with nothing listening.
@@ -491,6 +557,9 @@ run_battery "mhd"
test_method_not_allowed
test_upload_too_big
+test_upload_too_big_early
+test_upload_too_big_no_continue
+test_upload_too_big_chunked
test_keepalive_curl
test_wget_basic
test_pipelined