aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/tools/unittests/run_tests_test.py
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-03-12 09:01:49 +0100
committerMichaël Zasso <targos@protonmail.com>2019-03-14 18:49:21 +0100
commit7b48713334469818661fe276cf571de9c7899f2d (patch)
tree4dbda49ac88db76ce09dc330a0cb587e68e139ba /deps/v8/tools/unittests/run_tests_test.py
parent8549ac09b256666cf5275224ec58fab9939ff32e (diff)
downloadandroid-node-v8-7b48713334469818661fe276cf571de9c7899f2d.tar.gz
android-node-v8-7b48713334469818661fe276cf571de9c7899f2d.tar.bz2
android-node-v8-7b48713334469818661fe276cf571de9c7899f2d.zip
deps: update V8 to 7.3.492.25
PR-URL: https://github.com/nodejs/node/pull/25852 Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/tools/unittests/run_tests_test.py')
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py175
1 files changed, 59 insertions, 116 deletions
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 4eb9feeac6..e136db6b53 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -103,8 +103,7 @@ def run_tests(basedir, *args, **kwargs):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
- code = standard_runner.StandardTestRunner(
- basedir=basedir).execute(sys_args)
+ code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
@@ -174,7 +173,6 @@ class SystemTest(unittest.TestCase):
'sweet/bananas',
'sweet/raspberries',
)
- self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
@@ -193,10 +191,9 @@ class SystemTest(unittest.TestCase):
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
- infra_staging=True,
+ infra_staging=False,
)
# One of the shards gets one variant of each test.
- self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
self.assertIn('Done running sweet/bananas', result.stdout, result)
@@ -225,10 +222,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
- def testFailProc(self):
- self.testFail(infra_staging=True)
-
- def testFail(self, infra_staging=True):
+ def testFail(self):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
@@ -237,17 +231,13 @@ class SystemTest(unittest.TestCase):
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 2 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
- def check_cleaned_json_output(self, expected_results_name, actual_json):
+ def check_cleaned_json_output(
+ self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
@@ -260,6 +250,7 @@ class SystemTest(unittest.TestCase):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
+ data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
@@ -272,10 +263,7 @@ class SystemTest(unittest.TestCase):
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
- def testFailWithRerunAndJSONProc(self):
- self.testFailWithRerunAndJSON(infra_staging=True)
-
- def testFailWithRerunAndJSON(self, infra_staging=True):
+ def testFailWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -288,21 +276,12 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'--json-test-results', json_path,
'sweet/strawberries',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 1 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
- if not infra_staging:
- # We run one test, which fails and gets re-run twice.
- self.assertIn('3 tests failed', result.stdout, result)
- else:
- # With test processors we don't count reruns as separated failures.
- # TODO(majeski): fix it?
- self.assertIn('1 tests failed', result.stdout, result)
+ # With test processors we don't count reruns as separated failures.
+ # TODO(majeski): fix it?
+ self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the
@@ -310,12 +289,10 @@ class SystemTest(unittest.TestCase):
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
- self.check_cleaned_json_output('expected_test_results1.json', json_path)
+ self.check_cleaned_json_output(
+ 'expected_test_results1.json', json_path, basedir)
- def testFlakeWithRerunAndJSONProc(self):
- self.testFlakeWithRerunAndJSON(infra_staging=True)
-
- def testFlakeWithRerunAndJSON(self, infra_staging=True):
+ def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -328,21 +305,15 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'--json-test-results', json_path,
'sweet',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 1 tests', result.stdout, result)
- self.assertIn(
- 'Done running sweet/bananaflakes: FAIL', result.stdout, result)
- self.assertIn('1 tests failed', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn(
- 'Done running sweet/bananaflakes: pass', result.stdout, result)
- self.assertIn('All tests succeeded', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananaflakes: pass', result.stdout, result)
+ self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
- self.check_cleaned_json_output('expected_test_results2.json', json_path)
+ self.check_cleaned_json_output(
+ 'expected_test_results2.json', json_path, basedir)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
@@ -355,7 +326,9 @@ class SystemTest(unittest.TestCase):
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
- v8_use_snapshot=False)
+ v8_use_snapshot=False, v8_enable_embedded_builtins=False,
+ v8_enable_verify_csa=False, v8_enable_lite_mode=False,
+ v8_enable_pointer_compression=False)
result = run_tests(
basedir,
'--mode=Release',
@@ -379,10 +352,7 @@ class SystemTest(unittest.TestCase):
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
- def testSkipsProc(self):
- self.testSkips(infra_staging=True)
-
- def testSkips(self, infra_staging=True):
+ def testSkips(self):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
@@ -391,19 +361,27 @@ class SystemTest(unittest.TestCase):
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 0 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('0 tests ran', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
- def testDefaultProc(self):
- self.testDefault(infra_staging=True)
+ def testRunSkips(self):
+ """Inverse the above. Test parameter to keep running skipped tests."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=nooptimization',
+ '--run-skipped',
+ 'sweet/strawberries',
+ )
+ self.assertIn('1 tests failed', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
- def testDefault(self, infra_staging=True):
+ def testDefault(self):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
@@ -411,13 +389,9 @@ class SystemTest(unittest.TestCase):
result = run_tests(
basedir,
'--mode=Release',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Warning: no tests were run!', result.stdout, result)
- else:
- self.assertIn('Running 0 base tests', result.stdout, result)
- self.assertIn('0 tests ran', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
@@ -514,10 +488,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result)
- def testPredictableProc(self):
- self.testPredictable(infra_staging=True)
-
- def testPredictable(self, infra_staging=True):
+ def testPredictable(self):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
@@ -531,13 +502,9 @@ class SystemTest(unittest.TestCase):
'--progress=verbose',
'--variants=default',
'sweet/bananas',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 1 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('1 tests ran', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result)
@@ -558,10 +525,7 @@ class SystemTest(unittest.TestCase):
# timeout was used.
self.assertEqual(0, result.returncode, result)
- def testRandomSeedStressWithDefaultProc(self):
- self.testRandomSeedStressWithDefault(infra_staging=True)
-
- def testRandomSeedStressWithDefault(self, infra_staging=True):
+ def testRandomSeedStressWithDefault(self):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
@@ -571,13 +535,9 @@ class SystemTest(unittest.TestCase):
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if infra_staging:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('2 tests ran', result.stdout, result)
- else:
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
@@ -592,7 +552,6 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'sweet/strawberries',
)
- self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
@@ -618,7 +577,6 @@ class SystemTest(unittest.TestCase):
)
# Both tests are either marked as running in only default or only
# slow variant.
- self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@@ -629,10 +587,7 @@ class SystemTest(unittest.TestCase):
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
- def testDotsProgressProc(self):
- self.testDotsProgress(infra_staging=True)
-
- def testDotsProgress(self, infra_staging=True):
+ def testDotsProgress(self):
with temp_base() as basedir:
result = run_tests(
basedir,
@@ -641,29 +596,19 @@ class SystemTest(unittest.TestCase):
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 2 tests', result.stdout, result)
- else:
- self.assertIn('Running 2 base tests', result.stdout, result)
- self.assertIn('2 tests ran', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
- def testMonoProgressProc(self):
- self._testCompactProgress('mono', True)
-
def testMonoProgress(self):
- self._testCompactProgress('mono', False)
-
- def testColorProgressProc(self):
- self._testCompactProgress('color', True)
+ self._testCompactProgress('mono')
def testColorProgress(self):
- self._testCompactProgress('color', False)
+ self._testCompactProgress('color')
- def _testCompactProgress(self, name, infra_staging):
+ def _testCompactProgress(self, name):
with temp_base() as basedir:
result = run_tests(
basedir,
@@ -671,14 +616,13 @@ class SystemTest(unittest.TestCase):
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
- infra_staging=infra_staging,
+ infra_staging=False,
)
if name == 'color':
- expected = ('\033[34m% 100\033[0m|'
- '\033[32m+ 1\033[0m|'
+ expected = ('\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
- expected = '% 100|+ 1|- 1]: Done'
+ expected = '+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
@@ -697,7 +641,6 @@ class SystemTest(unittest.TestCase):
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
- self.assertIn('Running 4 base tests', result.stdout, result)
self.assertIn('sweet/mangoes: pass', result.stdout, result)
self.assertIn('sweet/strawberries: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)