aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2015-11-03 19:06:50 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2015-11-06 23:02:11 +0100
commit33827e3f6864872f357da6f9566f3c90b138c4ef (patch)
tree048e8c57bada24f1258754f90286cc2ba129b147 /test
parentdf9c4a6a1fe42971e439701a70da61b1155ac57f (diff)
downloadandroid-node-v8-33827e3f6864872f357da6f9566f3c90b138c4ef.tar.gz
android-node-v8-33827e3f6864872f357da6f9566f3c90b138c4ef.tar.bz2
android-node-v8-33827e3f6864872f357da6f9566f3c90b138c4ef.zip
cluster: remove handles when disconnecting worker
Due to the race window between the master's "disconnect" message and the worker's "handle received" message, connections sometimes got stuck in the pending handles queue when calling `worker.disconnect()` in the master process. The observable effect from the client's perspective was a TCP or HTTP connection that simply stalled. This commit fixes that by closing open handles in the master when the "disconnect" message is sent. Fixes: https://github.com/nodejs/node/issues/3551 PR-URL: https://github.com/nodejs/node/pull/3677 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Fedor Indutny <fedor@indutny.com> Reviewed-By: James M Snell <jasnell@gmail.com>
Diffstat (limited to 'test')
-rw-r--r--test/parallel/test-cluster-disconnect-handles.js65
1 files changed, 65 insertions, 0 deletions
diff --git a/test/parallel/test-cluster-disconnect-handles.js b/test/parallel/test-cluster-disconnect-handles.js
new file mode 100644
index 0000000000..5fa9884453
--- /dev/null
+++ b/test/parallel/test-cluster-disconnect-handles.js
@@ -0,0 +1,65 @@
+/* eslint-disable no-debugger */
+// Flags: --expose_internals
+'use strict';
+
+const common = require('../common');
+const assert = require('assert');
+const cluster = require('cluster');
+const net = require('net');
+
+const Protocol = require('_debugger').Protocol;
+
+if (common.isWindows) {
+ console.log('1..0 # Skipped: SCHED_RR not reliable on Windows');
+ return;
+}
+
+cluster.schedulingPolicy = cluster.SCHED_RR;
+
+// Worker sends back a "I'm here" message, then immediately suspends
+// inside the debugger. The master connects to the debug agent first,
+// connects to the TCP server second, then disconnects the worker and
+// unsuspends it again. The ultimate goal of this tortured exercise
+// is to make sure the connection is still sitting in the master's
+// pending handle queue.
+if (cluster.isMaster) {
+ const handles = require('internal/cluster').handles;
+ // FIXME(bnoordhuis) lib/cluster.js scans the execArgv arguments for
+ // debugger flags and renumbers any port numbers it sees starting
+ // from the default port 5858. Add a '.' that circumvents the
+ // scanner but is ignored by atoi(3). Heinous hack.
+ cluster.setupMaster({ execArgv: [`--debug=${common.PORT}.`] });
+ const worker = cluster.fork();
+ worker.on('message', common.mustCall(message => {
+ assert.strictEqual(Array.isArray(message), true);
+ assert.strictEqual(message[0], 'listening');
+ const address = message[1];
+ const host = address.address;
+ const debugClient = net.connect({ host, port: common.PORT });
+ const protocol = new Protocol();
+ debugClient.setEncoding('utf8');
+ debugClient.on('data', data => protocol.execute(data));
+ debugClient.once('connect', common.mustCall(() => {
+ protocol.onResponse = common.mustCall(res => {
+ protocol.onResponse = () => {};
+ const conn = net.connect({ host, port: address.port });
+ conn.once('connect', common.mustCall(() => {
+ conn.destroy();
+ assert.notDeepStrictEqual(handles, {});
+ worker.disconnect();
+ assert.deepStrictEqual(handles, {});
+ const req = protocol.serialize({ command: 'continue' });
+ debugClient.write(req);
+ }));
+ });
+ }));
+ }));
+ process.on('exit', () => assert.deepStrictEqual(handles, {}));
+} else {
+ const server = net.createServer(socket => socket.pipe(socket));
+ server.listen(() => {
+ process.send(['listening', server.address()]);
+ debugger;
+ });
+ process.on('disconnect', process.exit);
+}