summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorJamie Davis <davisjam@vt.edu>2017-11-15 12:28:04 -0500
committerRuben Bridgewater <ruben@bridgewater.de>2018-02-01 11:11:48 +0100
commit67a4ce1c6e230508ba307502e0937a63a7e07482 (patch)
treedf7abfc81285a87eef07a4bc74ad504a4b2d8906 /test
parent36fd25fa0572403ff32645611e1993de25422182 (diff)
downloadandroid-node-v8-67a4ce1c6e230508ba307502e0937a63a7e07482.tar.gz
android-node-v8-67a4ce1c6e230508ba307502e0937a63a7e07482.tar.bz2
android-node-v8-67a4ce1c6e230508ba307502e0937a63a7e07482.zip
fs: partition readFile against pool exhaustion
Problem: Node implements fs.readFile as: - a call to stat, then - a C++ -> libuv request to read the entire file using the stat size Why is this bad? The effect is to place on the libuv threadpool a potentially-large read request, occupying the libuv thread until it completes. While readFile certainly requires buffering the entire file contents, it can partition the read into smaller buffers (as is done on other read paths) along the way to avoid threadpool exhaustion. If the file is relatively large or stored on a slow medium, reading the entire file in one shot seems particularly harmful, and presents a possible DoS vector. Solution: Partition the read into multiple smaller requests. Considerations: 1. Correctness I don't think partitioning the read like this raises any additional risk of read-write races on the FS. If the application is concurrently readFile'ing and modifying the file, it will already see funny behavior. Though libuv uses preadv where available, this doesn't guarantee read atomicity in the presence of concurrent writes. 2. Performance Downside: Partitioning means that a single large readFile will require into many "out and back" requests to libuv, introducing overhead. Upside: In between each "out and back", other work pending on the threadpool can take a turn. In short, although partitioning will slow down a large request, it will lead to better throughput if the threadpool is handling more than one type of request. Fixes: https://github.com/nodejs/node/issues/17047 PR-URL: https://github.com/nodejs/node/pull/17054 Reviewed-By: Benjamin Gruenbaum <benjamingr@gmail.com> Reviewed-By: Tiancheng "Timothy" Gu <timothygu99@gmail.com> Reviewed-By: Gireesh Punathil <gpunathi@in.ibm.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Sakthipriyan Vairamani <thechargingvolcano@gmail.com> Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de>
Diffstat (limited to 'test')
-rw-r--r--test/parallel/test-fs-readfile.js58
1 files changed, 58 insertions, 0 deletions
diff --git a/test/parallel/test-fs-readfile.js b/test/parallel/test-fs-readfile.js
new file mode 100644
index 0000000000..689f98ff84
--- /dev/null
+++ b/test/parallel/test-fs-readfile.js
@@ -0,0 +1,58 @@
+'use strict';
+const common = require('../common');
+
+// This test ensures that fs.readFile correctly returns the
+// contents of varying-sized files.
+
+const assert = require('assert');
+const fs = require('fs');
+const path = require('path');
+
+const prefix = `.removeme-fs-readfile-${process.pid}`;
+
+common.refreshTmpDir();
+
+const fileInfo = [
+ { name: path.join(common.tmpDir, `${prefix}-1K.txt`),
+ len: 1024,
+ },
+ { name: path.join(common.tmpDir, `${prefix}-64K.txt`),
+ len: 64 * 1024,
+ },
+ { name: path.join(common.tmpDir, `${prefix}-64KLessOne.txt`),
+ len: (64 * 1024) - 1,
+ },
+ { name: path.join(common.tmpDir, `${prefix}-1M.txt`),
+ len: 1 * 1024 * 1024,
+ },
+ { name: path.join(common.tmpDir, `${prefix}-1MPlusOne.txt`),
+ len: (1 * 1024 * 1024) + 1,
+ },
+];
+
+// Populate each fileInfo (and file) with unique fill.
+const sectorSize = 512;
+for (const e of fileInfo) {
+ e.contents = Buffer.allocUnsafe(e.len);
+
+ // This accounts for anything unusual in Node's implementation of readFile.
+ // Using e.g. 'aa...aa' would miss bugs like Node re-reading
+ // the same section twice instead of two separate sections.
+ for (let offset = 0; offset < e.len; offset += sectorSize) {
+ const fillByte = 256 * Math.random();
+ const nBytesToFill = Math.min(sectorSize, e.len - offset);
+ e.contents.fill(fillByte, offset, offset + nBytesToFill);
+ }
+
+ fs.writeFileSync(e.name, e.contents);
+}
+// All files are now populated.
+
+// Test readFile on each size.
+for (const e of fileInfo) {
+ fs.readFile(e.name, common.mustCall((err, buf) => {
+ console.log(`Validating readFile on file ${e.name} of length ${e.len}`);
+ assert.ifError(err, 'An error occurred');
+ assert.deepStrictEqual(buf, e.contents, 'Incorrect file contents');
+ }));
+}