summaryrefslogtreecommitdiff
path: root/deps/v8/tools/testrunner/testproc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/tools/testrunner/testproc')
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py51
-rw-r--r--deps/v8/tools/testrunner/testproc/combiner.py124
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py65
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py287
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py26
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/seed.py58
-rw-r--r--deps/v8/tools/testrunner/testproc/sigproc.py31
-rw-r--r--deps/v8/tools/testrunner/testproc/timeout.py28
10 files changed, 628 insertions, 71 deletions
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
index 1a87dbed55..5cb1182e89 100644
--- a/deps/v8/tools/testrunner/testproc/base.py
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -37,36 +37,12 @@ DROP_OUTPUT = 1
DROP_PASS_OUTPUT = 2
DROP_PASS_STDOUT = 3
-def get_reduce_result_function(requirement):
- if requirement == DROP_RESULT:
- return lambda _: None
-
- if requirement == DROP_OUTPUT:
- def f(result):
- result.output = None
- return result
- return f
-
- if requirement == DROP_PASS_OUTPUT:
- def f(result):
- if not result.has_unexpected_output:
- result.output = None
- return result
- return f
-
- if requirement == DROP_PASS_STDOUT:
- def f(result):
- if not result.has_unexpected_output:
- result.output.stdout = None
- result.output.stderr = None
- return result
- return f
-
class TestProc(object):
def __init__(self):
self._prev_proc = None
self._next_proc = None
+ self._stopped = False
self._requirement = DROP_RESULT
self._prev_requirement = None
self._reduce_result = lambda result: result
@@ -90,8 +66,14 @@ class TestProc(object):
self._prev_requirement = requirement
if self._next_proc:
self._next_proc.setup(max(requirement, self._requirement))
- if self._prev_requirement < self._requirement:
- self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+ # Since we're not winning anything by droping part of the result we are
+ # dropping the whole result or pass it as it is. The real reduction happens
+ # during result creation (in the output processor), so the result is
+ # immutable.
+ if (self._prev_requirement < self._requirement and
+ self._prev_requirement == DROP_RESULT):
+ self._reduce_result = lambda _: None
def next_test(self, test):
"""
@@ -111,6 +93,18 @@ class TestProc(object):
if self._prev_proc:
self._prev_proc.heartbeat()
+ def stop(self):
+ if not self._stopped:
+ self._stopped = True
+ if self._prev_proc:
+ self._prev_proc.stop()
+ if self._next_proc:
+ self._next_proc.stop()
+
+ @property
+ def is_stopped(self):
+ return self._stopped
+
### Communication
def _send_test(self, test):
@@ -119,7 +113,8 @@ class TestProc(object):
def _send_result(self, test, result):
"""Helper method for sending result to the previous processor."""
- result = self._reduce_result(result)
+ if not test.keep_output:
+ result = self._reduce_result(result)
self._prev_proc.result_for(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/combiner.py b/deps/v8/tools/testrunner/testproc/combiner.py
new file mode 100644
index 0000000000..50944e1e5e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/combiner.py
@@ -0,0 +1,124 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import time
+
+from . import base
+from ..objects import testcase
+from ..outproc import base as outproc
+
+
+class CombinerProc(base.TestProc):
+ def __init__(self, rng, min_group_size, max_group_size, count):
+ """
+ Args:
+ rng: random number generator
+ min_group_size: minimum number of tests to combine
+ max_group_size: maximum number of tests to combine
+ count: how many tests to generate. 0 means infinite running
+ """
+ super(CombinerProc, self).__init__()
+
+ self._rng = rng
+ self._min_size = min_group_size
+ self._max_size = max_group_size
+ self._count = count
+
+ # Index of the last generated test
+ self._current_num = 0
+
+ # {suite name: instance of TestGroups}
+ self._groups = defaultdict(TestGroups)
+
+ # {suite name: instance of TestCombiner}
+ self._combiners = {}
+
+ def setup(self, requirement=base.DROP_RESULT):
+ # Combiner is not able to pass results (even as None) to the previous
+ # processor.
+ assert requirement == base.DROP_RESULT
+ self._next_proc.setup(base.DROP_RESULT)
+
+ def next_test(self, test):
+ group_key = self._get_group_key(test)
+ if not group_key:
+ # Test not suitable for combining
+ return
+
+ self._groups[test.suite.name].add_test(group_key, test)
+
+ def _get_group_key(self, test):
+ combiner = self._get_combiner(test.suite)
+ if not combiner:
+ print ('>>> Warning: There is no combiner for %s testsuite' %
+ test.suite.name)
+ return None
+ return combiner.get_group_key(test)
+
+ def result_for(self, test, result):
+ self._send_next_test()
+
+ def generate_initial_tests(self, num=1):
+ for _ in xrange(0, num):
+ self._send_next_test()
+
+ def _send_next_test(self):
+ if self.is_stopped:
+ return
+
+ if self._count and self._current_num >= self._count:
+ return
+
+ combined_test = self._create_new_test()
+ if not combined_test:
+ # Not enough tests
+ return
+
+ self._send_test(combined_test)
+
+ def _create_new_test(self):
+ suite, combiner = self._select_suite()
+ groups = self._groups[suite]
+
+ max_size = self._rng.randint(self._min_size, self._max_size)
+ sample = groups.sample(self._rng, max_size)
+ if not sample:
+ return None
+
+ self._current_num += 1
+ return combiner.combine('%s-%d' % (suite, self._current_num), sample)
+
+ def _select_suite(self):
+ """Returns pair (suite name, combiner)."""
+ selected = self._rng.randint(0, len(self._groups) - 1)
+ for n, suite in enumerate(self._groups):
+ if n == selected:
+ return suite, self._combiners[suite]
+
+ def _get_combiner(self, suite):
+ combiner = self._combiners.get(suite.name)
+ if not combiner:
+ combiner = suite.get_test_combiner()
+ self._combiners[suite.name] = combiner
+ return combiner
+
+
+class TestGroups(object):
+ def __init__(self):
+ self._groups = defaultdict(list)
+ self._keys = []
+
+ def add_test(self, key, test):
+ self._groups[key].append(test)
+ self._keys.append(key)
+
+ def sample(self, rng, max_size):
+ # Not enough tests
+ if not self._groups:
+ return None
+
+ group_key = rng.choice(self._keys)
+ tests = self._groups[group_key]
+ return [rng.choice(tests) for _ in xrange(0, max_size)]
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
index 021b02af3e..2d1ea02cd0 100644
--- a/deps/v8/tools/testrunner/testproc/execution.py
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -15,12 +15,12 @@ def run_job(job, process_context):
return job.run(process_context)
-def create_process_context(requirement):
- return ProcessContext(base.get_reduce_result_function(requirement))
+def create_process_context(result_reduction):
+ return ProcessContext(result_reduction)
JobResult = collections.namedtuple('JobResult', ['id', 'result'])
-ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+ProcessContext = collections.namedtuple('ProcessContext', ['result_reduction'])
class Job(object):
@@ -32,9 +32,8 @@ class Job(object):
def run(self, process_ctx):
output = self.cmd.execute()
- result = self.outproc.process(output)
- if not self.keep_output:
- result = process_ctx.reduce_result_f(result)
+ reduction = process_ctx.result_reduction if not self.keep_output else None
+ result = self.outproc.process(output, reduction)
return JobResult(self.test_id, result)
@@ -44,49 +43,51 @@ class ExecutionProc(base.TestProc):
sends results to the previous processor.
"""
- def __init__(self, jobs, context):
+ def __init__(self, jobs, outproc_factory=None):
super(ExecutionProc, self).__init__()
self._pool = pool.Pool(jobs)
- self._context = context
+ self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
self._tests = {}
def connect_to(self, next_proc):
assert False, 'ExecutionProc cannot be connected to anything'
- def start(self):
- try:
- it = self._pool.imap_unordered(
+ def run(self):
+ it = self._pool.imap_unordered(
fn=run_job,
gen=[],
process_context_fn=create_process_context,
process_context_args=[self._prev_requirement],
- )
- for pool_result in it:
- if pool_result.heartbeat:
- continue
-
- job_result = pool_result.value
- test_id, result = job_result
-
- test, result.cmd = self._tests[test_id]
- del self._tests[test_id]
- self._send_result(test, result)
- except KeyboardInterrupt:
- raise
- except:
- traceback.print_exc()
- raise
- finally:
- self._pool.terminate()
+ )
+ for pool_result in it:
+ self._unpack_result(pool_result)
def next_test(self, test):
+ if self.is_stopped:
+ return
+
test_id = test.procid
- cmd = test.get_command(self._context)
+ cmd = test.get_command()
self._tests[test_id] = test, cmd
- # TODO(majeski): Needs factory for outproc as in local/execution.py
- outproc = test.output_proc
+ outproc = self._outproc_factory(test)
self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
def result_for(self, test, result):
assert False, 'ExecutionProc cannot receive results'
+
+ def stop(self):
+ super(ExecutionProc, self).stop()
+ self._pool.abort()
+
+ def _unpack_result(self, pool_result):
+ if pool_result.heartbeat:
+ self.heartbeat()
+ return
+
+ job_result = pool_result.value
+ test_id, result = job_result
+
+ test, result.cmd = self._tests[test_id]
+ del self._tests[test_id]
+ self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
new file mode 100644
index 0000000000..607c010cf3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -0,0 +1,27 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+from testrunner.local import statusfile
+from testrunner.outproc import base as outproc
+
+class ForgiveTimeoutProc(base.TestProcProducer):
+ """Test processor passing tests and results through and forgiving timeouts."""
+ def __init__(self):
+ super(ForgiveTimeoutProc, self).__init__('no-timeout')
+
+ def _next_test(self, test):
+ subtest = self._create_subtest(test, 'no_timeout')
+ if subtest.expected_outcomes == outproc.OUTCOMES_PASS:
+ subtest.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+ elif subtest.expected_outcomes == outproc.OUTCOMES_FAIL:
+ subtest.expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
+ elif statusfile.TIMEOUT not in subtest.expected_outcomes:
+ subtest.expected_outcomes = (
+ subtest.expected_outcomes + [statusfile.TIMEOUT])
+ self._send_test(subtest)
+
+ def _result_for(self, test, subtest, result):
+ self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
new file mode 100644
index 0000000000..624b9aac04
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -0,0 +1,287 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import time
+
+from . import base
+
+
+class FuzzerConfig(object):
+ def __init__(self, probability, analyzer, fuzzer):
+ """
+ Args:
+ probability: of choosing this fuzzer (0; 10]
+ analyzer: instance of Analyzer class, can be None if no analysis is needed
+ fuzzer: instance of Fuzzer class
+ """
+ assert probability > 0 and probability <= 10
+
+ self.probability = probability
+ self.analyzer = analyzer
+ self.fuzzer = fuzzer
+
+
+class Analyzer(object):
+ def get_analysis_flags(self):
+ raise NotImplementedError()
+
+ def do_analysis(self, result):
+ raise NotImplementedError()
+
+
+class Fuzzer(object):
+ def create_flags_generator(self, rng, test, analysis_value):
+ """
+ Args:
+ rng: random number generator
+ test: test for which to create flags
+ analysis_value: value returned by the analyzer. None if there is no
+ corresponding analyzer to this fuzzer or the analysis phase is disabled
+ """
+ raise NotImplementedError()
+
+
+# TODO(majeski): Allow multiple subtests to run at once.
+class FuzzerProc(base.TestProcProducer):
+ def __init__(self, rng, count, fuzzers, disable_analysis=False):
+ """
+ Args:
+ rng: random number generator used to select flags and values for them
+ count: number of tests to generate based on each base test
+ fuzzers: list of FuzzerConfig instances
+ disable_analysis: disable analysis phase and filtering base on it. When
+ set, processor passes None as analysis result to fuzzers
+ """
+ super(FuzzerProc, self).__init__('Fuzzer')
+
+ self._rng = rng
+ self._count = count
+ self._fuzzer_configs = fuzzers
+ self._disable_analysis = disable_analysis
+ self._gens = {}
+
+ def setup(self, requirement=base.DROP_RESULT):
+ # Fuzzer is optimized to not store the results
+ assert requirement == base.DROP_RESULT
+ super(FuzzerProc, self).setup(requirement)
+
+ def _next_test(self, test):
+ if self.is_stopped:
+ return
+
+ analysis_subtest = self._create_analysis_subtest(test)
+ if analysis_subtest:
+ self._send_test(analysis_subtest)
+ else:
+ self._gens[test.procid] = self._create_gen(test)
+ self._try_send_next_test(test)
+
+ def _create_analysis_subtest(self, test):
+ if self._disable_analysis:
+ return None
+
+ analysis_flags = []
+ for fuzzer_config in self._fuzzer_configs:
+ if fuzzer_config.analyzer:
+ analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
+
+ if analysis_flags:
+ analysis_flags = list(set(analysis_flags))
+ return self._create_subtest(test, 'analysis', flags=analysis_flags,
+ keep_output=True)
+
+
+ def _result_for(self, test, subtest, result):
+ if not self._disable_analysis:
+ if result is not None:
+ # Analysis phase, for fuzzing we drop the result.
+ if result.has_unexpected_output:
+ self._send_result(test, None)
+ return
+ self._gens[test.procid] = self._create_gen(test, result)
+
+ self._try_send_next_test(test)
+
+ def _create_gen(self, test, analysis_result=None):
+ # It will be called with analysis_result==None only when there is no
+ # analysis phase at all, so no fuzzer has it's own analyzer.
+ gens = []
+ indexes = []
+ for i, fuzzer_config in enumerate(self._fuzzer_configs):
+ analysis_value = None
+ if analysis_result and fuzzer_config.analyzer:
+ analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
+ if not analysis_value:
+ # Skip fuzzer for this test since it doesn't have analysis data
+ continue
+ p = fuzzer_config.probability
+ flag_gen = fuzzer_config.fuzzer.create_flags_generator(self._rng, test,
+ analysis_value)
+ indexes += [len(gens)] * p
+ gens.append((p, flag_gen))
+
+ if not gens:
+ # No fuzzers for this test, skip it
+ return
+
+ i = 0
+ while not self._count or i < self._count:
+ main_index = self._rng.choice(indexes)
+ _, main_gen = gens[main_index]
+
+ flags = next(main_gen)
+ for index, (p, gen) in enumerate(gens):
+ if index == main_index:
+ continue
+ if self._rng.randint(1, 10) <= p:
+ flags += next(gen)
+
+ flags.append('--fuzzer-random-seed=%s' % self._next_seed())
+ yield self._create_subtest(test, str(i), flags=flags)
+
+ i += 1
+
+ def _try_send_next_test(self, test):
+ if not self.is_stopped:
+ for subtest in self._gens[test.procid]:
+ self._send_test(subtest)
+ return
+
+ del self._gens[test.procid]
+ self._send_result(test, None)
+
+ def _next_seed(self):
+ seed = None
+ while not seed:
+ seed = self._rng.randint(-2147483648, 2147483647)
+ return seed
+
+
+class ScavengeAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Maximum new space size reached = '):
+ return int(float(line.split()[7]))
+
+
+class ScavengeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-scavenge=%d' % (analysis_value or 100)]
+
+
+class MarkingAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Maximum marking limit reached = '):
+ return int(float(line.split()[6]))
+
+
+class MarkingFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-marking=%d' % (analysis_value or 100)]
+
+
+class GcIntervalAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Allocations = '):
+ return int(float(line.split()[3][:-1]))
+
+
+class GcIntervalFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ if analysis_value:
+ value = analysis_value / 10
+ else:
+ value = 10000
+ while True:
+ yield ['--random-gc-interval=%d' % value]
+
+
+class CompactionFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-compaction-random']
+
+
+class ThreadPoolSizeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+
+
+class InterruptBudgetFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ limit = 1 + int(rng.random() * 144)
+ yield ['--interrupt-budget=%d' % rng.randint(1, limit * 1024)]
+
+
+class DeoptAnalyzer(Analyzer):
+ MAX_DEOPT=1000000000
+
+ def __init__(self, min_interval):
+ super(DeoptAnalyzer, self).__init__()
+ self._min = min_interval
+
+ def get_analysis_flags(self):
+ return ['--deopt-every-n-times=%d' % self.MAX_DEOPT,
+ '--print-deopt-stress']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('=== Stress deopt counter: '):
+ counter = self.MAX_DEOPT - int(line.split(' ')[-1])
+ if counter < self._min:
+ # Skip this test since we won't generate any meaningful interval with
+ # given minimum.
+ return None
+ return counter
+
+
+class DeoptFuzzer(Fuzzer):
+ def __init__(self, min_interval):
+ super(DeoptFuzzer, self).__init__()
+ self._min = min_interval
+
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ if analysis_value:
+ value = analysis_value / 2
+ else:
+ value = 10000
+ interval = rng.randint(self._min, max(value, self._min))
+ yield ['--deopt-every-n-times=%d' % interval]
+
+
+FUZZERS = {
+ 'compaction': (None, CompactionFuzzer),
+ 'deopt': (DeoptAnalyzer, DeoptFuzzer),
+ 'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
+ 'interrupt_budget': (None, InterruptBudgetFuzzer),
+ 'marking': (MarkingAnalyzer, MarkingFuzzer),
+ 'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+ 'threads': (None, ThreadPoolSizeFuzzer),
+}
+
+
+def create_fuzzer_config(name, probability, *args, **kwargs):
+ analyzer_class, fuzzer_class = FUZZERS[name]
+ return FuzzerConfig(
+ probability,
+ analyzer_class(*args, **kwargs) if analyzer_class else None,
+ fuzzer_class(*args, **kwargs),
+ )
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 78514f7252..221c64bfdd 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -103,6 +103,15 @@ class SimpleProgressIndicator(ProgressIndicator):
class VerboseProgressIndicator(SimpleProgressIndicator):
+ def __init__(self):
+ super(VerboseProgressIndicator, self).__init__()
+ self._last_printed_time = time.time()
+
+ def _print(self, text):
+ print text
+ sys.stdout.flush()
+ self._last_printed_time = time.time()
+
def _on_result_for(self, test, result):
super(VerboseProgressIndicator, self)._on_result_for(test, result)
# TODO(majeski): Support for dummy/grouped results
@@ -113,12 +122,13 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
outcome = 'FAIL'
else:
outcome = 'pass'
- print 'Done running %s: %s' % (test, outcome)
- sys.stdout.flush()
+ self._print('Done running %s: %s' % (test, outcome))
def _on_heartbeat(self):
- print 'Still working...'
- sys.stdout.flush()
+ if time.time() - self._last_printed_time > 30:
+ # Print something every 30 seconds to not get killed by an output
+ # timeout.
+ self._print('Still working...')
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -292,7 +302,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, json_test_results, arch, mode, random_seed):
+ def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
@@ -303,7 +313,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
- self.random_seed = random_seed
self.results = []
self.tests = []
@@ -338,10 +347,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
-
- # TODO(machenbach): This stores only the global random seed from the
- # context and not possible overrides when using random-seed stress.
- "random_seed": self.random_seed,
+ "random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
})
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
index 7f96e0260c..a72bb3ebc6 100644
--- a/deps/v8/tools/testrunner/testproc/rerun.py
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -34,7 +34,7 @@ class RerunProc(base.TestProcProducer):
results = self._results[test.procid]
results.append(result)
- if self._needs_rerun(test, result):
+ if not self.is_stopped and self._needs_rerun(test, result):
self._rerun[test.procid] += 1
if self._rerun_total_left is not None:
self._rerun_total_left -= 1
diff --git a/deps/v8/tools/testrunner/testproc/seed.py b/deps/v8/tools/testrunner/testproc/seed.py
new file mode 100644
index 0000000000..3f40e79b34
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/seed.py
@@ -0,0 +1,58 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+from collections import defaultdict
+
+from . import base
+from ..utils import random_utils
+
+
+class SeedProc(base.TestProcProducer):
+ def __init__(self, count, seed=None, parallel_subtests=1):
+ """
+ Args:
+ count: How many subtests with different seeds to create for each test.
+ 0 means infinite.
+ seed: seed to use. None means random seed for each subtest.
+ parallel_subtests: How many subtest of each test to run at the same time.
+ """
+ super(SeedProc, self).__init__('Seed')
+ self._count = count
+ self._seed = seed
+ self._last_idx = defaultdict(int)
+ self._todo = defaultdict(int)
+ self._parallel_subtests = parallel_subtests
+ if count:
+ self._parallel_subtests = min(self._parallel_subtests, count)
+
+ def setup(self, requirement=base.DROP_RESULT):
+ super(SeedProc, self).setup(requirement)
+
+ # SeedProc is optimized for dropping the result
+ assert requirement == base.DROP_RESULT
+
+ def _next_test(self, test):
+ for _ in xrange(0, self._parallel_subtests):
+ self._try_send_next_test(test)
+
+ def _result_for(self, test, subtest, result):
+ self._todo[test.procid] -= 1
+ self._try_send_next_test(test)
+
+ def _try_send_next_test(self, test):
+ def create_subtest(idx):
+ seed = self._seed or random_utils.random_seed()
+ return self._create_subtest(test, idx, random_seed=seed)
+
+ num = self._last_idx[test.procid]
+ if not self._count or num < self._count:
+ num += 1
+ self._send_test(create_subtest(num))
+ self._todo[test.procid] += 1
+ self._last_idx[test.procid] = num
+ elif not self._todo.get(test.procid):
+ del self._last_idx[test.procid]
+ del self._todo[test.procid]
+ self._send_result(test, None)
diff --git a/deps/v8/tools/testrunner/testproc/sigproc.py b/deps/v8/tools/testrunner/testproc/sigproc.py
new file mode 100644
index 0000000000..e97fe7ece3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/sigproc.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import signal
+
+from . import base
+from testrunner.local import utils
+
+
+class SignalProc(base.TestProcObserver):
+ def __init__(self):
+ super(SignalProc, self).__init__()
+ self.exit_code = utils.EXIT_CODE_PASS
+
+ def setup(self, *args, **kwargs):
+ super(SignalProc, self).setup(*args, **kwargs)
+ # It should be called after processors are chained together to not loose
+ # catched signal.
+ signal.signal(signal.SIGINT, self._on_ctrlc)
+ signal.signal(signal.SIGTERM, self._on_sigterm)
+
+ def _on_ctrlc(self, _signum, _stack_frame):
+ print '>>> Ctrl-C detected, early abort...'
+ self.exit_code = utils.EXIT_CODE_INTERRUPTED
+ self.stop()
+
+ def _on_sigterm(self, _signum, _stack_frame):
+ print '>>> SIGTERM received, early abort...'
+ self.exit_code = utils.EXIT_CODE_TERMINATED
+ self.stop()
diff --git a/deps/v8/tools/testrunner/testproc/timeout.py b/deps/v8/tools/testrunner/testproc/timeout.py
new file mode 100644
index 0000000000..84ddc656e2
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/timeout.py
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from . import base
+
+
+class TimeoutProc(base.TestProcObserver):
+ def __init__(self, duration_sec):
+ super(TimeoutProc, self).__init__()
+ self._duration_sec = duration_sec
+ self._start = time.time()
+
+ def _on_next_test(self, test):
+ self._on_event()
+
+ def _on_result_for(self, test, result):
+ self._on_event()
+
+ def _on_heartbeat(self):
+ self._on_event()
+
+ def _on_event(self):
+ if not self.is_stopped:
+ if time.time() - self._start > self._duration_sec:
+ self.stop()