summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorAnna Henningsen <anna@addaleax.net>2019-07-03 12:38:05 +0200
committerMichaƫl Zasso <targos@protonmail.com>2019-07-20 11:10:26 +0200
commit03de3062817dcf826d65a0242cf50adc04255c3c (patch)
tree8960c6421733f492d5938cff5d9e35e9a62cd9e7 /test
parentaee86940f9ca31d09389579b2b2e5fac292e192a (diff)
downloadandroid-node-v8-03de3062817dcf826d65a0242cf50adc04255c3c.tar.gz
android-node-v8-03de3062817dcf826d65a0242cf50adc04255c3c.tar.bz2
android-node-v8-03de3062817dcf826d65a0242cf50adc04255c3c.zip
zlib: do not coalesce multiple `.flush()` calls
This is an approach to address the issue linked below. Previously, when `.write()` and `.flush()` calls to a zlib stream were interleaved synchronously (i.e. without waiting for these operations to finish), multiple flush calls would have been coalesced into a single flushing operation. This patch changes behaviour so that each `.flush()` all corresponds to one flushing operation on the underlying zlib resource, and the order of operations is as if the `.flush()` call were a `.write()` call. One test had to be removed because it specifically tested the previous behaviour. As a drive-by fix, this also makes sure that all flush callbacks are called. Previously, that was not the case. Fixes: https://github.com/nodejs/node/issues/28478 PR-URL: https://github.com/nodejs/node/pull/28520 Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de> Reviewed-By: Luigi Pinca <luigipinca@gmail.com>
Diffstat (limited to 'test')
-rw-r--r--test/parallel/test-zlib-flush-multiple-scheduled.js39
-rw-r--r--test/parallel/test-zlib-flush-write-sync-interleaved.js57
-rw-r--r--test/parallel/test-zlib-write-after-flush.js1
3 files changed, 57 insertions, 40 deletions
diff --git a/test/parallel/test-zlib-flush-multiple-scheduled.js b/test/parallel/test-zlib-flush-multiple-scheduled.js
deleted file mode 100644
index 0b752557e4..0000000000
--- a/test/parallel/test-zlib-flush-multiple-scheduled.js
+++ /dev/null
@@ -1,39 +0,0 @@
-'use strict';
-
-const common = require('../common');
-const assert = require('assert');
-const zlib = require('zlib');
-
-const {
- Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, Z_FULL_FLUSH, Z_FINISH
-} = zlib.constants;
-
-async function getOutput(...sequenceOfFlushes) {
- const zipper = zlib.createGzip({ highWaterMark: 16384 });
-
- zipper.write('A'.repeat(17000));
- for (const flush of sequenceOfFlushes) {
- zipper.flush(flush);
- }
-
- const data = [];
-
- return new Promise((resolve) => {
- zipper.on('data', common.mustCall((d) => {
- data.push(d);
- if (data.length === 2) resolve(Buffer.concat(data));
- }, 2));
- });
-}
-
-(async function() {
- assert.deepStrictEqual(await getOutput(Z_SYNC_FLUSH),
- await getOutput(Z_SYNC_FLUSH, Z_PARTIAL_FLUSH));
- assert.deepStrictEqual(await getOutput(Z_SYNC_FLUSH),
- await getOutput(Z_PARTIAL_FLUSH, Z_SYNC_FLUSH));
-
- assert.deepStrictEqual(await getOutput(Z_FINISH),
- await getOutput(Z_FULL_FLUSH, Z_FINISH));
- assert.deepStrictEqual(await getOutput(Z_FINISH),
- await getOutput(Z_SYNC_FLUSH, Z_FINISH));
-})();
diff --git a/test/parallel/test-zlib-flush-write-sync-interleaved.js b/test/parallel/test-zlib-flush-write-sync-interleaved.js
new file mode 100644
index 0000000000..9fed592a34
--- /dev/null
+++ b/test/parallel/test-zlib-flush-write-sync-interleaved.js
@@ -0,0 +1,57 @@
+'use strict';
+const common = require('../common');
+const assert = require('assert');
+const { createGzip, createGunzip, Z_PARTIAL_FLUSH } = require('zlib');
+
+// Verify that .flush() behaves like .write() in terms of ordering, e.g. in
+// a sequence like .write() + .flush() + .write() + .flush() each .flush() call
+// only affects the data written before it.
+// Refs: https://github.com/nodejs/node/issues/28478
+
+const compress = createGzip();
+const decompress = createGunzip();
+decompress.setEncoding('utf8');
+
+const events = [];
+const compressedChunks = [];
+
+for (const chunk of ['abc', 'def', 'ghi']) {
+ compress.write(chunk, common.mustCall(() => events.push({ written: chunk })));
+ compress.flush(Z_PARTIAL_FLUSH, common.mustCall(() => {
+ events.push('flushed');
+ const chunk = compress.read();
+ if (chunk !== null)
+ compressedChunks.push(chunk);
+ }));
+}
+
+compress.end(common.mustCall(() => {
+ events.push('compress end');
+ writeToDecompress();
+}));
+
+function writeToDecompress() {
+ // Write the compressed chunks to a decompressor, one by one, in order to
+ // verify that the flushes actually worked.
+ const chunk = compressedChunks.shift();
+ if (chunk === undefined) return decompress.end();
+ decompress.write(chunk, common.mustCall(() => {
+ events.push({ read: decompress.read() });
+ writeToDecompress();
+ }));
+}
+
+process.on('exit', () => {
+ assert.deepStrictEqual(events, [
+ { written: 'abc' },
+ 'flushed',
+ { written: 'def' },
+ 'flushed',
+ { written: 'ghi' },
+ 'flushed',
+ 'compress end',
+ { read: 'abc' },
+ { read: 'def' },
+ { read: 'ghi' }
+ ]);
+});
diff --git a/test/parallel/test-zlib-write-after-flush.js b/test/parallel/test-zlib-write-after-flush.js
index 2fcae2a213..6edcae2e2f 100644
--- a/test/parallel/test-zlib-write-after-flush.js
+++ b/test/parallel/test-zlib-write-after-flush.js
@@ -39,7 +39,6 @@ for (const [ createCompress, createDecompress ] of [
gunz.on('data', (c) => output += c);
gunz.on('end', common.mustCall(() => {
assert.strictEqual(output, input);
- assert.strictEqual(gzip._nextFlush, -1);
}));
// Make sure that flush/write doesn't trigger an assert failure