summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorAnna Henningsen <anna@addaleax.net>2018-03-29 22:21:17 +0200
committerMyles Borins <mylesborins@google.com>2018-06-12 20:46:15 -0400
commit3217e8e66fa81e787b9f3b18c0c09235f050acee (patch)
tree412b9aeb694056e67dd43078bf3bcd34b0c47e34 /test
parent785e5ba48cb57a05c9c0966a502d34ac03084561 (diff)
downloadandroid-node-v8-3217e8e66fa81e787b9f3b18c0c09235f050acee.tar.gz
android-node-v8-3217e8e66fa81e787b9f3b18c0c09235f050acee.tar.bz2
android-node-v8-3217e8e66fa81e787b9f3b18c0c09235f050acee.zip
src: re-add `Realloc()` shrink after reading stream data
This would otherwise keep a lot of unused memory lying around, and in particular add up to a page per chunk of memory overhead for network reads, potentially opening a DoS vector if the resulting `Buffer` objects are kept around indefinitely (e.g. stored in a list and not concatenated until the socket finishes). This fixes CVE-2018-7164. Refs: https://github.com/nodejs-private/security/issues/186 Refs: https://github.com/nodejs/node/commit/7c4b09b24bbe7d6a8cbad256f47b30a101a909ea PR-URL: https://github.com/nodejs-private/node-private/pull/128 Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com> Reviewed-By: Evan Lucas <evanlucas@me.com>
Diffstat (limited to 'test')
-rw-r--r--test/sequential/test-net-bytes-per-incoming-chunk-overhead.js41
1 files changed, 41 insertions, 0 deletions
diff --git a/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js b/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js
new file mode 100644
index 0000000000..8f766e8c7a
--- /dev/null
+++ b/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js
@@ -0,0 +1,41 @@
+// Flags: --expose-gc
+'use strict';
+
+const common = require('../common');
+const assert = require('assert');
+const net = require('net');
+
+// Tests that, when receiving small chunks, we do not keep the full length
+// of the original allocation for the libuv read call in memory.
+
+let client;
+let baseRSS;
+const receivedChunks = [];
+const N = 250000;
+
+const server = net.createServer(common.mustCall((socket) => {
+ baseRSS = process.memoryUsage().rss;
+
+ socket.setNoDelay(true);
+ socket.on('data', (chunk) => {
+ receivedChunks.push(chunk);
+ if (receivedChunks.length < N) {
+ client.write('a');
+ } else {
+ client.end();
+ server.close();
+ }
+ });
+})).listen(0, common.mustCall(() => {
+ client = net.connect(server.address().port);
+ client.setNoDelay(true);
+ client.write('hello!');
+}));
+
+process.on('exit', () => {
+ global.gc();
+ const bytesPerChunk =
+ (process.memoryUsage().rss - baseRSS) / receivedChunks.length;
+ // We should always have less than one page (usually ~ 4 kB) per chunk.
+ assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`);
+});