diff options
author | Gireesh Punathil <gpunathi@in.ibm.com> | 2018-09-26 02:46:48 -0400 |
---|---|---|
committer | Gireesh Punathil <gpunathi@in.ibm.com> | 2018-12-30 19:45:57 +0530 |
commit | 54fa59c8bf441e4e8c187150f7ffe11680e9ccba (patch) | |
tree | 941fbd09214ad0a2151dabe926b0dcb73f24c78a /test | |
parent | 903630e72ece60c764b9cac9d0941400377b7ac6 (diff) | |
download | android-node-v8-54fa59c8bf441e4e8c187150f7ffe11680e9ccba.tar.gz android-node-v8-54fa59c8bf441e4e8c187150f7ffe11680e9ccba.tar.bz2 android-node-v8-54fa59c8bf441e4e8c187150f7ffe11680e9ccba.zip |
test: regression test for uv threadpool congestion
Validate that massive dns lookups do not block filesytem I/O
(or any fast I/O for that matter).
Prior to https://github.com/libuv/libuv/pull/1845 few back-to-back dns
lookup were sufficient to engage libuv threadpool workers in a blocking
manner, throttling other work items that need the pool. this test acts
as a regression test for the same.
Start slow and fast I/Os together, and make sure fast I/O can complete
in at least in 1/100th of time for slow I/O.
Refs: https://github.com/libuv/libuv/pull/1845
Refs: https://github.com/nodejs/node/issues/8436
PR-URL: https://github.com/nodejs/node/pull/23099
Reviewed-By: Sakthipriyan Vairamani <thechargingvolcano@gmail.com>
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Diffstat (limited to 'test')
-rw-r--r-- | test/internet/test-uv-threadpool-schedule.js | 65 |
1 files changed, 65 insertions, 0 deletions
diff --git a/test/internet/test-uv-threadpool-schedule.js b/test/internet/test-uv-threadpool-schedule.js new file mode 100644 index 0000000000..636364b30f --- /dev/null +++ b/test/internet/test-uv-threadpool-schedule.js @@ -0,0 +1,65 @@ +'use strict'; + +// Test to validate massive dns lookups do not block filesytem I/O +// (or any fast I/O). Prior to https://github.com/libuv/libuv/pull/1845 +// few back-to-back dns lookups were sufficient to engage libuv +// threadpool workers in a blocking manner, throttling other work items +// that need pool resources. Start slow and fast I/Os together, make sure +// fast I/O can complete in at least in 1/100th of time for slow I/O. +// TEST TIME TO COMPLETION: ~5 seconds. + +const common = require('../common'); +const dns = require('dns'); +const fs = require('fs'); +const assert = require('assert'); + +const start = Date.now(); + +const slowIOmax = 100; +let slowIOcount = 0; +let fastIOdone = false; +let slowIOend, fastIOend; + +function onResolve() { + slowIOcount++; + if (slowIOcount === slowIOmax) { + slowIOend = Date.now(); + + // Conservative expectation: finish disc I/O + // at least by when the net I/O completes. + assert.ok(fastIOdone, + 'fast I/O was throttled due to threadpool congestion.'); + + // More realistic expectation: finish disc I/O at least within + // a time duration that is 1/100th of net I/O. + // Ideally the slow I/O should not affect the fast I/O as those + // have two different thread-pool buckets. However, this could be + // highly load / platform dependent, so don't be very greedy. + const fastIOtime = fastIOend - start; + const slowIOtime = slowIOend - start; + const expectedMax = slowIOtime / 100; + assert.ok(fastIOtime < expectedMax, + 'fast I/O took longer to complete, ' + + `actual: ${fastIOtime}, expected: ${expectedMax}`); + } +} + + +for (let i = 0; i < slowIOmax; i++) { + // We need to refresh the domain string everytime, + // otherwise the TCP stack that cache the previous lookup + // returns result from memory, breaking all our Math. + dns.lookup(`${randomDomain()}.com`, {}, common.mustCall(onResolve)); +} + +fs.readFile(__filename, common.mustCall(() => { + fastIOend = Date.now(); + fastIOdone = true; +})); + +function randomDomain() { + const d = Buffer.alloc(10); + for (let i = 0; i < 10; i++) + d[i] = 97 + (Math.round(Math.random() * 13247)) % 26; + return d.toString(); +} |