summaryrefslogtreecommitdiff
path: root/deps/v8/tools
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a /deps/v8/tools
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadandroid-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/tools')
-rw-r--r--deps/v8/tools/BUILD.gn9
-rw-r--r--deps/v8/tools/Makefile.tags30
-rwxr-xr-xdeps/v8/tools/bigint-tester.py54
-rw-r--r--deps/v8/tools/check-static-initializers.gyp26
-rw-r--r--deps/v8/tools/clusterfuzz/BUILD.gn (renamed from deps/v8/tools/foozzie/BUILD.gn)2
-rw-r--r--deps/v8/tools/clusterfuzz/PRESUBMIT.py8
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt (renamed from deps/v8/tools/foozzie/testdata/failure_output.txt)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/fuzz-123.js (renamed from deps/v8/tools/foozzie/testdata/fuzz-123.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_1.py (renamed from deps/v8/tools/foozzie/testdata/test_d8_1.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_2.py (renamed from deps/v8/tools/foozzie/testdata/test_d8_2.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_3.py (renamed from deps/v8/tools/foozzie/testdata/test_d8_3.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/v8_build_config.json (renamed from deps/v8/tools/foozzie/testdata/v8_build_config.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_commands.py (renamed from deps/v8/tools/foozzie/v8_commands.py)0
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py (renamed from deps/v8/tools/foozzie/v8_foozzie.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js100
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/clusterfuzz/v8_foozzie_test.py (renamed from deps/v8/tools/foozzie/v8_foozzie_test.py)36
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py45
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock.js (renamed from deps/v8/tools/foozzie/v8_mock.js)11
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock_archs.js (renamed from deps/v8/tools/foozzie/v8_mock_archs.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_suppressions.js (renamed from deps/v8/tools/foozzie/v8_suppressions.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_suppressions.py (renamed from deps/v8/tools/foozzie/v8_suppressions.py)8
-rwxr-xr-xdeps/v8/tools/dev/gm.py15
-rwxr-xr-xdeps/v8/tools/gcmole/download_gcmole_tools.py20
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua54
-rw-r--r--deps/v8/tools/gcmole/run_gcmole.gyp23
-rwxr-xr-xdeps/v8/tools/gcov.sh67
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py5
-rwxr-xr-xdeps/v8/tools/get_landmines.py50
-rwxr-xr-xdeps/v8/tools/grokdump.py80
-rwxr-xr-xdeps/v8/tools/gyp_flag_compare.py280
-rw-r--r--deps/v8/tools/heap-stats/README.md5
-rw-r--r--deps/v8/tools/heap-stats/categories.js98
-rw-r--r--deps/v8/tools/heap-stats/details-selection.html100
-rw-r--r--deps/v8/tools/heap-stats/details-selection.js157
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.js1
-rw-r--r--deps/v8/tools/heap-stats/histogram-viewer.html19
-rw-r--r--deps/v8/tools/heap-stats/histogram-viewer.js152
-rw-r--r--deps/v8/tools/heap-stats/index.html37
-rw-r--r--deps/v8/tools/heap-stats/model.js77
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.html63
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js231
-rw-r--r--deps/v8/tools/isolate_driver.py2
-rw-r--r--deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py20
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp26
-rwxr-xr-xdeps/v8/tools/mb/mb.py520
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py281
-rwxr-xr-xdeps/v8/tools/mingw-generate-makefiles.sh97
-rw-r--r--deps/v8/tools/mips_toolchain.tar.gz.sha11
-rwxr-xr-xdeps/v8/tools/node/build_gn.py94
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py6
-rwxr-xr-xdeps/v8/tools/node/update_node.py35
-rw-r--r--deps/v8/tools/parser-shell.cc63
-rw-r--r--deps/v8/tools/parser-shell.gyp60
-rwxr-xr-xdeps/v8/tools/presubmit.py6
-rw-r--r--deps/v8/tools/release/common_includes.py1
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py9
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py10
-rwxr-xr-xdeps/v8/tools/release/update_node.py176
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.gyp26
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.isolate19
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py14
-rw-r--r--deps/v8/tools/run-num-fuzzer.gyp26
-rw-r--r--deps/v8/tools/run-num-fuzzer.isolate5
-rwxr-xr-xdeps/v8/tools/run-num-fuzzer.py (renamed from deps/v8/tools/run-gc-fuzzer.py)6
-rwxr-xr-xdeps/v8/tools/run_perf.py3
-rw-r--r--deps/v8/tools/testrunner/base_runner.py245
-rwxr-xr-xdeps/v8/tools/testrunner/deopt_fuzzer.py336
-rwxr-xr-xdeps/v8/tools/testrunner/gc_fuzzer.py280
-rw-r--r--deps/v8/tools/testrunner/local/command.py31
-rw-r--r--deps/v8/tools/testrunner/local/execution.py293
-rw-r--r--deps/v8/tools/testrunner/local/perfdata.py141
-rw-r--r--deps/v8/tools/testrunner/local/pool.py201
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/testrunner/local/pool_unittest.py22
-rw-r--r--deps/v8/tools/testrunner/local/progress.py452
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py4
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py105
-rw-r--r--deps/v8/tools/testrunner/local/utils.py15
-rw-r--r--deps/v8/tools/testrunner/local/variants.py27
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py225
-rw-r--r--deps/v8/tools/testrunner/objects/context.py51
-rw-r--r--deps/v8/tools/testrunner/objects/output.py9
-rw-r--r--deps/v8/tools/testrunner/objects/predictable.py20
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py117
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py27
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py505
-rw-r--r--deps/v8/tools/testrunner/test_config.py32
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py51
-rw-r--r--deps/v8/tools/testrunner/testproc/combiner.py124
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py65
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py287
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py26
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/seed.py58
-rw-r--r--deps/v8/tools/testrunner/testproc/sigproc.py31
-rw-r--r--deps/v8/tools/testrunner/testproc/timeout.py28
-rw-r--r--deps/v8/tools/testrunner/trycatch_loader.js42
-rw-r--r--deps/v8/tools/testrunner/utils/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/utils/random_utils.py13
-rw-r--r--deps/v8/tools/toolchain/BUILD.gn23
-rwxr-xr-xdeps/v8/tools/try_perf.py32
-rw-r--r--deps/v8/tools/turbolizer/index.html6
-rw-r--r--deps/v8/tools/turbolizer/monkey.js19
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css11
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.js3
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py61
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json30
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json20
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py8
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py8
-rw-r--r--deps/v8/tools/v8heapconst.py383
-rwxr-xr-xdeps/v8/tools/verify_source_deps.py187
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh10
-rw-r--r--deps/v8/tools/whitespace.txt4
114 files changed, 3469 insertions, 4600 deletions
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 1c0864d0d8..d8fdc49505 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -11,7 +11,6 @@ group("gn_all") {
deps = [
":check-static-initializers_run",
":jsfunfuzz_run",
- ":run-deopt-fuzzer_run",
":run-gcmole_run",
":run-num-fuzzer_run",
]
@@ -34,14 +33,6 @@ v8_isolate_run("jsfunfuzz") {
isolate = "jsfunfuzz/jsfunfuzz.isolate"
}
-v8_isolate_run("run-deopt-fuzzer") {
- deps = [
- "..:d8_run",
- ]
-
- isolate = "run-deopt-fuzzer.isolate"
-}
-
v8_isolate_run("run-gcmole") {
deps = [
"..:d8_run",
diff --git a/deps/v8/tools/Makefile.tags b/deps/v8/tools/Makefile.tags
new file mode 100644
index 0000000000..372824dad7
--- /dev/null
+++ b/deps/v8/tools/Makefile.tags
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# Variable default definitions. Override them by exporting them in your shell.
+V8_DIR ?= $(realpath $(dir $(lastword $(MAKEFILE_LIST)))/..)
+
+# Support for the GNU GLOBAL Source Code Tag System.
+$(V8_DIR)/gtags.files: $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+ @(cd $(V8_DIR) && find include src test -name '*.h' -o -name '*.cc' -o -name '*.c') > $@
+
+# We need to manually set the stack limit here, to work around bugs in
+# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
+# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
+# gtags.files after switching branches don't cause recipe failures.
+$(V8_DIR)/GPATH $(V8_DIR)/GRTAGS $(V8_DIR)/GSYMS $(V8_DIR)/GTAGS: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+ @cd $(V8_DIR) && bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
+
+$(V8_DIR)/tags: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+ @(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
+ (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
+ @cd $(V8_DIR) && ctags --fields=+l -L gtags.files
+
+tags: $(V8_DIR)/tags
+
+tags.clean:
+ @rm -f $(addprefix $(V8_DIR), gtags.files GPATH GRTAGS GSYMS GTAGS tags)
+
+clean: tags.clean
diff --git a/deps/v8/tools/bigint-tester.py b/deps/v8/tools/bigint-tester.py
index 0452a0d1db..d48d2546f9 100755
--- a/deps/v8/tools/bigint-tester.py
+++ b/deps/v8/tools/bigint-tester.py
@@ -13,9 +13,9 @@ import sys
import tempfile
# Configuration.
-kChars = "0123456789abcdefghijklmnopqrstuvwxyz"
+kChars = "0123456789abcdef"
kBase = 16
-kLineLength = 71 # A bit less than 80.
+kLineLength = 70 # A bit less than 80.
kNumInputsGenerate = 20
kNumInputsStress = 1000
@@ -46,29 +46,36 @@ if (error_count !== 0) {
}"""
def GenRandom(length, negative=kRandom):
- if length == 0: return "0"
+ if length == 0: return "0n"
s = []
if negative == kYes or (negative == kRandom and (random.randint(0, 1) == 0)):
s.append("-") # 50% chance of negative.
+ s.append("0x")
s.append(kChars[random.randint(1, kBase - 1)]) # No leading zero.
for i in range(1, length):
s.append(kChars[random.randint(0, kBase - 1)])
+ s.append("n")
return "".join(s)
-def Format(x, base):
+def Parse(x):
+ assert x[-1] == 'n', x
+ return int(x[:-1], kBase)
+
+def Format(x):
original = x
negative = False
- if x == 0: return "0"
+ if x == 0: return "0n"
if x < 0:
negative = True
x = -x
s = ""
while x > 0:
- s = kChars[x % base] + s
- x = x / base
+ s = kChars[x % kBase] + s
+ x = x / kBase
+ s = "0x" + s + "n"
if negative:
s = "-" + s
- assert int(s, base) == original
+ assert Parse(s) == original
return s
class TestGenerator(object):
@@ -120,17 +127,16 @@ class UnaryOp(TestGenerator):
# Subclasses should not override anything below.
def EmitOne(self):
x_str = self.GenerateInput()
- x_num = int(x_str, kBase)
+ x_num = Parse(x_str)
result_num = self.GenerateResult(x_num)
- result_str = Format(result_num, kBase)
- return "{\n a: \"%s\",\n r: \"%s\"\n}" % (x_str, result_str)
+ result_str = Format(result_num)
+ return "{\n a: %s,\n r: %s\n}" % (x_str, result_str)
def EmitTestCore(self):
return """\
- var a = BigInt.parseInt(d.a, %(base)d);
- var r = %(op)sa;
- if (d.r !== r.toString(%(base)d)) {
- print("Input: " + a.toString(%(base)d));
+ var r = %(op)sd.a;
+ if (d.r !== r) {
+ print("Input: " + d.a.toString(%(base)d));
print("Result: " + r.toString(%(base)d));
print("Expected: " + d.r);
error_count++;
@@ -152,21 +158,19 @@ class BinaryOp(TestGenerator):
# Subclasses should not override anything below.
def EmitOne(self):
left_str, right_str = self.GenerateInputs()
- left_num = int(left_str, kBase)
- right_num = int(right_str, kBase)
+ left_num = Parse(left_str)
+ right_num = Parse(right_str)
result_num = self.GenerateResult(left_num, right_num)
- result_str = Format(result_num, kBase)
- return ("{\n a: \"%s\",\n b: \"%s\",\n r: \"%s\"\n}" %
+ result_str = Format(result_num)
+ return ("{\n a: %s,\n b: %s,\n r: %s\n}" %
(left_str, right_str, result_str))
def EmitTestCore(self):
return """\
- var a = BigInt.parseInt(d.a, %(base)d);
- var b = BigInt.parseInt(d.b, %(base)d);
- var r = a %(op)s b;
- if (d.r !== r.toString(%(base)d)) {
- print("Input A: " + a.toString(%(base)d));
- print("Input B: " + b.toString(%(base)d));
+ var r = d.a %(op)s d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(%(base)d));
+ print("Input B: " + d.b.toString(%(base)d));
print("Result: " + r.toString(%(base)d));
print("Expected: " + d.r);
print("Op: %(op)s");
diff --git a/deps/v8/tools/check-static-initializers.gyp b/deps/v8/tools/check-static-initializers.gyp
deleted file mode 100644
index cfeacfc89f..0000000000
--- a/deps/v8/tools/check-static-initializers.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'check_static_initializers_run',
- 'type': 'none',
- 'dependencies': [
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'check-static-initializers.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/foozzie/BUILD.gn b/deps/v8/tools/clusterfuzz/BUILD.gn
index 532c1faca4..88219600a2 100644
--- a/deps/v8/tools/foozzie/BUILD.gn
+++ b/deps/v8/tools/clusterfuzz/BUILD.gn
@@ -9,6 +9,8 @@ if (v8_correctness_fuzzer) {
sources = [
"v8_commands.py",
"v8_foozzie.py",
+ "v8_foozzie_harness_adjust.js",
+ "v8_fuzz_config.py",
"v8_mock.js",
"v8_mock_archs.js",
"v8_suppressions.js",
diff --git a/deps/v8/tools/clusterfuzz/PRESUBMIT.py b/deps/v8/tools/clusterfuzz/PRESUBMIT.py
new file mode 100644
index 0000000000..0faeb0603c
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/PRESUBMIT.py
@@ -0,0 +1,8 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def CheckChangeOnCommit(input_api, output_api):
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '.', whitelist=['v8_foozzie_test.py$'])
+ return input_api.RunTests(tests)
diff --git a/deps/v8/tools/foozzie/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index 85b1d7ab77..85b1d7ab77 100644
--- a/deps/v8/tools/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
diff --git a/deps/v8/tools/foozzie/testdata/fuzz-123.js b/deps/v8/tools/clusterfuzz/testdata/fuzz-123.js
index fbde5736d4..fbde5736d4 100644
--- a/deps/v8/tools/foozzie/testdata/fuzz-123.js
+++ b/deps/v8/tools/clusterfuzz/testdata/fuzz-123.js
diff --git a/deps/v8/tools/foozzie/testdata/test_d8_1.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
index 15a93fa535..15a93fa535 100644
--- a/deps/v8/tools/foozzie/testdata/test_d8_1.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
diff --git a/deps/v8/tools/foozzie/testdata/test_d8_2.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
index f2bdacfaa1..f2bdacfaa1 100644
--- a/deps/v8/tools/foozzie/testdata/test_d8_2.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
diff --git a/deps/v8/tools/foozzie/testdata/test_d8_3.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
index a6c8682b2f..a6c8682b2f 100644
--- a/deps/v8/tools/foozzie/testdata/test_d8_3.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
diff --git a/deps/v8/tools/foozzie/testdata/v8_build_config.json b/deps/v8/tools/clusterfuzz/testdata/v8_build_config.json
index ea27b1ccd7..ea27b1ccd7 100644
--- a/deps/v8/tools/foozzie/testdata/v8_build_config.json
+++ b/deps/v8/tools/clusterfuzz/testdata/v8_build_config.json
diff --git a/deps/v8/tools/foozzie/v8_commands.py b/deps/v8/tools/clusterfuzz/v8_commands.py
index 0b3cae722b..0b3cae722b 100644
--- a/deps/v8/tools/foozzie/v8_commands.py
+++ b/deps/v8/tools/clusterfuzz/v8_commands.py
diff --git a/deps/v8/tools/foozzie/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 9bb3512bcf..9bb3512bcf 100755
--- a/deps/v8/tools/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js b/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
new file mode 100644
index 0000000000..9509437827
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extensions to mjsunit and other test harnesses added between harness and
+// fuzzing code.
+
+try {
+ // Scope for utility functions.
+ (function() {
+ // Same as in mjsunit.js.
+ function classOf(object) {
+ // Argument must not be null or undefined.
+ var string = Object.prototype.toString.call(object);
+ // String has format [object <ClassName>].
+ return string.substring(8, string.length - 1);
+ }
+
+ // Override prettyPrinted with a version that also recusively prints object
+ // properties (with a depth of 3).
+ let origPrettyPrinted = this.prettyPrinted;
+ this.prettyPrinted = function prettyPrinted(value, depth=3) {
+ if (depth == 0) {
+ return "...";
+ }
+ switch (typeof value) {
+ case "object":
+ if (value === null) return "null";
+ var objectClass = classOf(value);
+ switch (objectClass) {
+ case "Object":
+ var name = value.constructor.name;
+ if (!name)
+ name = "Object";
+ return name + "{" + Object.keys(value).map(function(key, index) {
+ return (
+ prettyPrinted(key, depth - 1) +
+ ": " +
+ prettyPrinted(value[key], depth - 1)
+ );
+ }).join(",") + "}";
+ }
+ }
+ // Fall through to original version for all other types.
+ return origPrettyPrinted(value);
+ }
+
+ // We're not interested in stack traces.
+ this.MjsUnitAssertionError = function MjsUnitAssertionError(message) {}
+ MjsUnitAssertionError.prototype.toString = function () { return ""; };
+
+ // Do more printing in assertions for more correctness coverage.
+ this.failWithMessage = function failWithMessage(message) {
+ print(prettyPrinted(message))
+ }
+
+ this.fail = function fail(expectedText, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertSame = function assertSame(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertNotSame = function assertNotSame(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertEquals = function assertEquals(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertNotEquals = function assertNotEquals(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertNull = function assertNull(value, name_opt) {
+ print(prettyPrinted(value));
+ }
+
+ this.assertNotNull = function assertNotNull(value, name_opt) {
+ print(prettyPrinted(value));
+ }
+
+ // Suppress optimization status as it leads to false positives.
+ this.assertUnoptimized = function assertUnoptimized() {}
+
+ this.assertOptimized = function assertOptimized() {}
+
+ this.isNeverOptimize = function isNeverOptimize() {}
+
+ this.isAlwaysOptimize = function isAlwaysOptimize() {}
+
+ this.isInterpreted = function isInterpreted() {}
+
+ this.isOptimized = function isOptimized() {}
+
+ this.isTurboFanned = function isTurboFanned() {}
+ })();
+} catch(e) { }
diff --git a/deps/v8/tools/foozzie/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index ffe18a88d5..3b95111271 100644..100755
--- a/deps/v8/tools/foozzie/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,12 +9,43 @@ import sys
import unittest
import v8_foozzie
+import v8_fuzz_config
import v8_suppressions
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
TEST_DATA = os.path.join(BASE_DIR, 'testdata')
+
+class ConfigTest(unittest.TestCase):
+ def testExperiments(self):
+ """Test that probabilities add up to 100 and that all config names exist.
+ """
+ EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
+ CONFIGS = v8_foozzie.CONFIGS
+ assert sum(x[0] for x in EXPERIMENTS) == 100
+ assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))
+ assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))
+ assert all(map(lambda x: x[3].endswith('d8'), EXPERIMENTS))
+
+ def testConfig(self):
+ """Smoke test how to choose experiments.
+
+ When experiment distribution changes this test might change, too.
+ """
+ class Rng(object):
+ def random(self):
+ return 0.5
+ self.assertEqual(
+ [
+ '--first-config=ignition',
+ '--second-config=ignition_turbo',
+ '--second-d8=d8',
+ ],
+ v8_fuzz_config.Config('foo', Rng()).choose_foozzie_flags(),
+ )
+
+
class UnitTest(unittest.TestCase):
def testDiff(self):
# TODO(machenbach): Mock out suppression configuration.
@@ -109,3 +141,7 @@ class SystemTest(unittest.TestCase):
e = ctx.exception
self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEquals(expected_output, cut_verbose_output(e.output))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
new file mode 100644
index 0000000000..8cc1939e38
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -0,0 +1,45 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+# List of configuration experiments for correctness fuzzing.
+# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
+# Probabilities must add up to 100.
+FOOZZIE_EXPERIMENTS = [
+ [5, 'ignition', 'ignition_asm', 'd8'],
+ [5, 'ignition', 'trusted', 'd8'],
+ [5, 'ignition', 'trusted_opt', 'd8'],
+ [10, 'ignition', 'slow_path', 'd8'],
+ [5, 'ignition', 'slow_path_opt', 'd8'],
+ [25, 'ignition', 'ignition_turbo', 'd8'],
+ [20, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
+ [5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
+ [5, 'ignition', 'ignition', 'clang_x86/d8'],
+ [5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
+ [5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
+]
+
+class Config(object):
+ def __init__(self, name, rng=None):
+ self.name = name
+ self.rng = rng or random.Random()
+
+ def choose_foozzie_flags(self):
+ """Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
+
+ Returns: List of flags to pass to v8_foozzie.py fuzz harness.
+ """
+ acc = 0
+ threshold = self.rng.random() * 100
+ for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
+ acc += prob
+ if acc > threshold:
+ return [
+ '--first-config=' + first_config,
+ '--second-config=' + second_config,
+ '--second-d8=' + second_d8,
+ ]
+ assert False
diff --git a/deps/v8/tools/foozzie/v8_mock.js b/deps/v8/tools/clusterfuzz/v8_mock.js
index 5d15304cd7..2f797dd89f 100644
--- a/deps/v8/tools/foozzie/v8_mock.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock.js
@@ -10,7 +10,7 @@
// This will be overridden in the test cases. The override can be minimized.
-var __PrettyPrint = function __PrettyPrint(msg) { print(msg); };
+var prettyPrinted = function prettyPrinted(msg) { return msg; };
// Mock Math.random.
(function () {
@@ -69,6 +69,9 @@ var __PrettyPrint = function __PrettyPrint(msg) { print(msg); };
if (property == "now") {
return mockDateNow;
}
+ if (property == "prototype") {
+ return origDate.prototype
+ }
},
}
@@ -121,16 +124,16 @@ Object.defineProperty(
];
Worker = function(code){
try {
- __PrettyPrint(eval(code));
+ print(prettyPrinted(eval(code)));
} catch(e) {
- __PrettyPrint(e);
+ print(prettyPrinted(e));
}
this.getMessage = function(){
index = (index + 1) % 10;
return workerMessages[index];
}
this.postMessage = function(msg){
- __PrettyPrint(msg);
+ print(prettyPrinted(msg));
}
};
})();
diff --git a/deps/v8/tools/foozzie/v8_mock_archs.js b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
index 507f31a3a2..507f31a3a2 100644
--- a/deps/v8/tools/foozzie/v8_mock_archs.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
diff --git a/deps/v8/tools/foozzie/v8_suppressions.js b/deps/v8/tools/clusterfuzz/v8_suppressions.js
index 011e7272ba..011e7272ba 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.js
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.js
diff --git a/deps/v8/tools/foozzie/v8_suppressions.py b/deps/v8/tools/clusterfuzz/v8_suppressions.py
index 87b1972e94..04f67b2cf9 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.py
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.py
@@ -72,14 +72,6 @@ IGNORE_SOURCES = {
# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_TEST_CASES = {
- 'slow_path': {
- 'crbug.com/800651':
- re.compile(r'async', re.S),
- },
- 'slow_path_opt': {
- 'crbug.com/800651':
- re.compile(r'async', re.S),
- },
}
# Ignore by output pattern. Map from config->bug->regexp. See IGNORE_TEST_CASES
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index c3dab0a870..6dfd46bf7b 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -20,15 +20,12 @@ All arguments are optional. Most combinations should work, e.g.:
from __future__ import print_function
import errno
-import multiprocessing
import os
import pty
import re
import subprocess
import sys
-BUILD_OPTS_DEFAULT = ""
-BUILD_OPTS_GOMA = "-j1000 -l%d" % (multiprocessing.cpu_count() + 2)
BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
BUILD_TARGETS_ALL = ["all"]
@@ -231,11 +228,6 @@ class Config(object):
arch_specific = self.GetTargetCpu() + self.GetV8TargetCpu()
return template % arch_specific
- def WantsGoma(self):
- output = _CallWithOutputNoTerminal(
- "gn args --short --list=use_goma %s" % (GetPath(self.arch, self.mode)))
- return "true" in output
-
def Build(self):
path = GetPath(self.arch, self.mode)
args_gn = os.path.join(path, "args.gn")
@@ -247,14 +239,13 @@ class Config(object):
code = _Call("gn gen %s" % path)
if code != 0: return code
targets = " ".join(self.targets)
- build_opts = BUILD_OPTS_GOMA if self.WantsGoma() else BUILD_OPTS_DEFAULT
# The implementation of mksnapshot failure detection relies on
# the "pty" module and GDB presence, so skip it on non-Linux.
if "linux" not in sys.platform:
- return _Call("ninja -C %s %s %s" % (path, build_opts, targets))
+ return _Call("autoninja -C %s %s" % (path, targets))
- return_code, output = _CallWithOutput("ninja -C %s %s %s" %
- (path, build_opts, targets))
+ return_code, output = _CallWithOutput("autoninja -C %s %s" %
+ (path, targets))
if return_code != 0 and "FAILED: gen/snapshot.cc" in output:
csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
match = csa_trap.search(output)
diff --git a/deps/v8/tools/gcmole/download_gcmole_tools.py b/deps/v8/tools/gcmole/download_gcmole_tools.py
deleted file mode 100755
index af27723da6..0000000000
--- a/deps/v8/tools/gcmole/download_gcmole_tools.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import re
-import subprocess
-
-GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
-SHA1_PATH = os.path.join(GCMOLE_PATH, 'gcmole-tools.tar.gz.sha1')
-
-if re.search(r'\bgcmole=1', os.environ.get('GYP_DEFINES', '')):
- subprocess.check_call([
- 'download_from_google_storage',
- '-b', 'chrome-v8-gcmole',
- '-u', '--no_resume',
- '-s', SHA1_PATH,
- '--platform=linux*'
- ])
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index 862b7b0247..d832041361 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -181,34 +181,6 @@ function InvokeClangPluginForEachFile(filenames, cfg, func)
end
-------------------------------------------------------------------------------
--- GYP file parsing
-
--- TODO(machenbach): Remove this when deprecating gyp.
-local function ParseGYPFile()
- local result = {}
- local gyp_files = {
- { "src/v8.gyp", "'([^']-%.cc)'", "src/" },
- { "test/cctest/cctest.gyp", "'(test-[^']-%.cc)'", "test/cctest/" }
- }
-
- for i = 1, #gyp_files do
- local filename = gyp_files[i][1]
- local pattern = gyp_files[i][2]
- local prefix = gyp_files[i][3]
- local gyp_file = assert(io.open(filename), "failed to open GYP file")
- local gyp = gyp_file:read('*a')
- for condition, sources in
- gyp:gmatch "%[.-### gcmole%((.-)%) ###(.-)%]" do
- if result[condition] == nil then result[condition] = {} end
- for file in sources:gmatch(pattern) do
- table.insert(result[condition], prefix .. file)
- end
- end
- gyp_file:close()
- end
-
- return result
-end
local function ParseGNFile()
local result = {}
@@ -258,34 +230,8 @@ local function BuildFileList(sources, props)
end
-local gyp_sources = ParseGYPFile()
local gn_sources = ParseGNFile()
--- TODO(machenbach): Remove this comparison logic when deprecating gyp.
-local function CompareSources(sources1, sources2, what)
- for condition, files1 in pairs(sources1) do
- local files2 = sources2[condition]
- assert(
- files2 ~= nil,
- "Missing gcmole condition in " .. what .. ": " .. condition)
-
- -- Turn into set for speed.
- files2_set = {}
- for i, file in pairs(files2) do files2_set[file] = true end
-
- for i, file in pairs(files1) do
- assert(
- files2_set[file] ~= nil,
- "Missing file " .. file .. " in " .. what .. " for condition " ..
- condition)
- end
- end
-end
-
-CompareSources(gyp_sources, gn_sources, "GN")
-CompareSources(gn_sources, gyp_sources, "GYP")
-
-
local function FilesForArch(arch)
return BuildFileList(gn_sources, { os = 'linux',
arch = arch,
diff --git a/deps/v8/tools/gcmole/run_gcmole.gyp b/deps/v8/tools/gcmole/run_gcmole.gyp
deleted file mode 100644
index 7d206bf412..0000000000
--- a/deps/v8/tools/gcmole/run_gcmole.gyp
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'run_gcmole_run',
- 'type': 'none',
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'run-gcmole.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/gcov.sh b/deps/v8/tools/gcov.sh
deleted file mode 100755
index d7277043d4..0000000000
--- a/deps/v8/tools/gcov.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Build and collect code coverage data, cummulatively, on specified architectures.
-
-BUILD_TYPE=${BUILD_TYPE:-Release}
-
-declare -A modes=( [Release]=release [Debug]=debug )
-declare -A pairs=( [arm]=ia32 [arm64]=x64 [ia32]=ia32 [x64]=x64 )
-
-if [ -z ${modes[$BUILD_TYPE]} ]
-then
- echo "BUILD_TYPE must be {<unspecified>|Release|Debug}"
- echo "Release is default"
- exit
-fi
-
-mode=${modes[$BUILD_TYPE]}
-
-echo "Using build:" $BUILD_TYPE
-v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-work_dir=$v8_root/cov
-build_dir=$work_dir/$BUILD_TYPE
-
-if [ -z $@ ]
-then
- echo "Pass at least one target architecture"
- echo "Supported architectures: x64 ia32 arm arm64"
- echo ""
- echo "Example: ./tools/gcov.sh x64 arm"
- echo ""
- echo "Optionally, set BUILD_TYPE env variable to"
- echo "either Debug or Release, to use the corresponding build."
- echo "By default, BUILD_TYPE is Release."
- echo ""
- echo "Example: BUILD_TYPE=Debug ./tools/gcov.sh x64 arm"
- echo ""
- exit
-fi
-
-lcov --directory=$build_dir --zerocounters
-
-# Mapping v8 build terminology to gnu compiler terminology:
-# target_arch is the host, and
-# v8_target_arch is the target
-
-for v8_target_arch in "$@"
-do
- target_arch=${pairs[$v8_target_arch]}
- if [ -z $target_arch ]
- then
- echo "Skipping unknown architecture: " $v8_target_arch
- else
- echo "Building" $v8_target_arch
- GYP_DEFINES="component=static_library use_goma=1 target_arch=$target_arch v8_target_arch=$v8_target_arch coverage=1 clang=0" python $v8_root/gypfiles/gyp_v8.py -G output_dir=$work_dir
- ninja -C $build_dir -j2000
- $v8_root/tools/run-tests.py --gcov-coverage --arch=$v8_target_arch --mode=$mode --shell-dir=$build_dir --variants=exhaustive
- fi
-done
-
-lcov --directory=$build_dir --capture --output-file $work_dir/app.info
-genhtml --output-directory $work_dir/html $work_dir/app.info
-echo "Done"
-echo "Output available at: " $work_dir/html/index.html
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 043ecc306d..b12809739a 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -226,6 +226,11 @@ consts_misc = [
'value': 'NumberDictionaryShape::kPrefixSize' },
{ 'name': 'numberdictionaryshape_entry_size',
'value': 'NumberDictionaryShape::kEntrySize' },
+
+ { 'name': 'simplenumberdictionaryshape_prefix_size',
+ 'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
+ { 'name': 'simplenumberdictionaryshape_entry_size',
+ 'value': 'SimpleNumberDictionaryShape::kEntrySize' },
];
#
diff --git a/deps/v8/tools/get_landmines.py b/deps/v8/tools/get_landmines.py
new file mode 100755
index 0000000000..ff4831dff5
--- /dev/null
+++ b/deps/v8/tools/get_landmines.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This file emits the list of reasons why a particular build needs to be clobbered
+(or a list of 'landmines').
+"""
+
+import sys
+
+
+def print_landmines(): # pylint: disable=invalid-name
+ """
+ ALL LANDMINES ARE EMITTED FROM HERE.
+ """
+ # DO NOT add landmines as part of a regular CL. Landmines are a last-effort
+ # bandaid fix if a CL that got landed has a build dependency bug and all bots
+ # need to be cleaned up. If you're writing a new CL that causes build
+ # dependency problems, fix the dependency problems instead of adding a
+ # landmine.
+ # See the Chromium version in src/build/get_landmines.py for usage examples.
+ print 'Need to clobber after ICU52 roll.'
+ print 'Landmines test.'
+ print 'Activating MSVS 2013.'
+ print 'Revert activation of MSVS 2013.'
+ print 'Activating MSVS 2013 again.'
+ print 'Clobber after ICU roll.'
+ print 'Moar clobbering...'
+ print 'Remove build/android.gypi'
+ print 'Cleanup after windows ninja switch attempt.'
+ print 'Switching to pinned msvs toolchain.'
+ print 'Clobbering to hopefully resolve problem with mksnapshot'
+ print 'Clobber after ICU roll.'
+ print 'Clobber after Android NDK update.'
+ print 'Clober to fix windows build problems.'
+ print 'Clober again to fix windows build problems.'
+ print 'Clobber to possibly resolve failure on win-32 bot.'
+ print 'Clobber for http://crbug.com/668958.'
+ return 0
+
+
+def main():
+ print_landmines()
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 5d9ffff607..570ef92118 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -185,6 +185,10 @@ MAGIC_MARKER_PAIRS = (
(0xbbbbbbbb, 0xbbbbbbbb),
(0xfefefefe, 0xfefefeff),
)
+# See StackTraceFailureMessage in isolate.h
+STACK_TRACE_MARKER = 0xdecade30
+# See FailureMessage in logging.cc
+ERROR_MESSAGE_MARKER = 0xdecade10
# Set of structures and constants that describe the layout of minidump
# files. Based on MSDN and Google Breakpad.
@@ -2105,11 +2109,9 @@ class InspectionPadawan(object):
"""
# Only look at the first 1k words on the stack
ptr_size = self.reader.PointerSize()
- if start is None:
- start = self.reader.ExceptionSP()
+ if start is None: start = self.reader.ExceptionSP()
if not self.reader.IsValidAddress(start): return start
end = start + ptr_size * 1024 * 4
- message_start = 0
magic1 = None
for slot in xrange(start, end, ptr_size):
if not self.reader.IsValidAddress(slot + ptr_size): break
@@ -2117,10 +2119,64 @@ class InspectionPadawan(object):
magic2 = self.reader.ReadUIntPtr(slot + ptr_size)
pair = (magic1 & 0xFFFFFFFF, magic2 & 0xFFFFFFFF)
if pair in MAGIC_MARKER_PAIRS:
- message_slot = slot + ptr_size * 4
- message_start = self.reader.ReadUIntPtr(message_slot)
- break
- if message_start == 0:
+ return self.TryExtractOldStyleStackTrace(slot, start, end,
+ print_message)
+ if pair[0] == STACK_TRACE_MARKER:
+ return self.TryExtractStackTrace(slot, start, end, print_message)
+ elif pair[0] == ERROR_MESSAGE_MARKER:
+ return self.TryExtractErrorMessage(slot, start, end, print_message)
+ # Simple fallback in case not stack trace object was found
+ return self.TryExtractOldStyleStackTrace(0, start, end,
+ print_message)
+
+ def TryExtractStackTrace(self, slot, start, end, print_message):
+ ptr_size = self.reader.PointerSize()
+ assert self.reader.ReadUIntPtr(slot) & 0xFFFFFFFF == STACK_TRACE_MARKER
+ end_marker = STACK_TRACE_MARKER + 1;
+ header_size = 10
+ # Look for the end marker after the fields and the message buffer.
+ end_search = start + (32 * 1024) + (header_size * ptr_size);
+ end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+ if not end_slot: return start
+ print "Stack Message (start=%s):" % self.heap.FormatIntPtr(slot)
+ slot += ptr_size
+ for name in ("isolate","ptr1", "ptr2", "ptr3", "ptr4", "codeObject1",
+ "codeObject2", "codeObject3", "codeObject4"):
+ value = self.reader.ReadUIntPtr(slot)
+ print " %s: %s" % (name.rjust(14), self.heap.FormatIntPtr(value))
+ slot += ptr_size
+ print " message start: %s" % self.heap.FormatIntPtr(slot)
+ stack_start = end_slot + ptr_size
+ print " stack_start: %s" % self.heap.FormatIntPtr(stack_start)
+ (message_start, message) = self.FindFirstAsciiString(slot)
+ self.FormatStackTrace(message, print_message)
+ return stack_start
+
+ def FindPtr(self, expected_value, start, end):
+ ptr_size = self.reader.PointerSize()
+ for slot in xrange(start, end, ptr_size):
+ if not self.reader.IsValidAddress(slot): return None
+ value = self.reader.ReadUIntPtr(slot)
+ if value == expected_value: return slot
+ return None
+
+ def TryExtractErrorMessage(self, slot, start, end, print_message):
+ end_marker = ERROR_MESSAGE_MARKER + 1;
+ header_size = 1
+ end_search = start + 1024 + (header_size * ptr_size);
+ end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+ if not end_slot: return start
+ print "Error Message (start=%s):" % self.heap.FormatIntPtr(slot)
+ slot += ptr_size
+ (message_start, message) = self.FindFirstAsciiString(slot)
+ self.FormatStackTrace(message, print_message)
+ stack_start = end_slot + ptr_size
+ return stack_start
+
+ def TryExtractOldStyleStackTrace(self, message_slot, start, end,
+ print_message):
+ ptr_size = self.reader.PointerSize()
+ if message_slot == 0:
"""
On Mac we don't always get proper magic markers, so just try printing
the first long ascii string found on the stack.
@@ -2130,6 +2186,7 @@ class InspectionPadawan(object):
message_start, message = self.FindFirstAsciiString(start, end, 128)
if message_start is None: return start
else:
+ message_start = self.reader.ReadUIntPtr(message_slot + ptr_size * 4)
message = self.reader.ReadAsciiString(message_start)
stack_start = message_start + len(message) + 1
# Make sure the address is word aligned
@@ -2149,10 +2206,15 @@ class InspectionPadawan(object):
print " message start: %s" % self.heap.FormatIntPtr(message_start)
print " stack_start: %s" % self.heap.FormatIntPtr(stack_start )
print ""
+ self.FormatStackTrace(message, print_message)
+ return stack_start
+
+ def FormatStackTrace(self, message, print_message):
if not print_message:
print " Use `dsa` to print the message with annotated addresses."
print ""
- return stack_start
+ return
+ ptr_size = self.reader.PointerSize()
# Annotate all addresses in the dumped message
prog = re.compile("[0-9a-fA-F]{%s}" % ptr_size*2)
addresses = list(set(prog.findall(message)))
@@ -2166,7 +2228,7 @@ class InspectionPadawan(object):
print message
print "="*80
print ""
- return stack_start
+
def TryInferFramePointer(self, slot, address):
""" Assume we have a framepointer if we find 4 consecutive links """
diff --git a/deps/v8/tools/gyp_flag_compare.py b/deps/v8/tools/gyp_flag_compare.py
deleted file mode 100755
index 86fa5c4098..0000000000
--- a/deps/v8/tools/gyp_flag_compare.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Given the output of -t commands from a ninja build for a gyp and GN generated
-build, report on differences between the command lines."""
-
-
-import os
-import shlex
-import subprocess
-import sys
-
-
-# Must be in v8/.
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-os.chdir(BASE_DIR)
-
-
-g_total_differences = 0
-
-
-def FindAndRemoveArgWithValue(command_line, argname):
- """Given a command line as a list, remove and return the value of an option
- that takes a value as a separate entry.
-
- Modifies |command_line| in place.
- """
- if argname not in command_line:
- return ''
- location = command_line.index(argname)
- value = command_line[location + 1]
- command_line[location:location + 2] = []
- return value
-
-
-def MergeSpacedArgs(command_line, argname):
- """Combine all arguments |argname| with their values, separated by a space."""
- i = 0
- result = []
- while i < len(command_line):
- arg = command_line[i]
- if arg == argname:
- result.append(arg + ' ' + command_line[i + 1])
- i += 1
- else:
- result.append(arg)
- i += 1
- return result
-
-
-def NormalizeSymbolArguments(command_line):
- """Normalize -g arguments.
-
- If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
- Modifies |command_line| in place.
- """
- # Strip -g0 if there's no symbols.
- have_some_symbols = False
- for x in command_line:
- if x.startswith('-g') and x != '-g0':
- have_some_symbols = True
- if not have_some_symbols and '-g0' in command_line:
- command_line.remove('-g0')
-
- # Rename -g2 to -g.
- if '-g2' in command_line:
- command_line[command_line.index('-g2')] = '-g'
-
-
-def GetFlags(lines, build_dir):
- """Turn a list of command lines into a semi-structured dict."""
- is_win = sys.platform == 'win32'
- flags_by_output = {}
- for line in lines:
- command_line = shlex.split(line.strip(), posix=not is_win)[1:]
-
- output_name = FindAndRemoveArgWithValue(command_line, '-o')
- dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
-
- NormalizeSymbolArguments(command_line)
-
- command_line = MergeSpacedArgs(command_line, '-Xclang')
-
- cc_file = [x for x in command_line if x.endswith('.cc') or
- x.endswith('.c') or
- x.endswith('.cpp')]
- if len(cc_file) != 1:
- print 'Skipping %s' % command_line
- continue
- assert len(cc_file) == 1
-
- if is_win:
- rsp_file = [x for x in command_line if x.endswith('.rsp')]
- assert len(rsp_file) <= 1
- if rsp_file:
- rsp_file = os.path.join(build_dir, rsp_file[0][1:])
- with open(rsp_file, "r") as open_rsp_file:
- command_line = shlex.split(open_rsp_file, posix=False)
-
- defines = [x for x in command_line if x.startswith('-D')]
- include_dirs = [x for x in command_line if x.startswith('-I')]
- dash_f = [x for x in command_line if x.startswith('-f')]
- warnings = \
- [x for x in command_line if x.startswith('/wd' if is_win else '-W')]
- others = [x for x in command_line if x not in defines and \
- x not in include_dirs and \
- x not in dash_f and \
- x not in warnings and \
- x not in cc_file]
-
- for index, value in enumerate(include_dirs):
- if value == '-Igen':
- continue
- path = value[2:]
- if not os.path.isabs(path):
- path = os.path.join(build_dir, path)
- include_dirs[index] = '-I' + os.path.normpath(path)
-
- # GYP supports paths above the source root like <(DEPTH)/../foo while such
- # paths are unsupported by gn. But gn allows to use system-absolute paths
- # instead (paths that start with single '/'). Normalize all paths.
- cc_file = [os.path.normpath(os.path.join(build_dir, cc_file[0]))]
-
- # Filter for libFindBadConstructs.so having a relative path in one and
- # absolute path in the other.
- others_filtered = []
- for x in others:
- if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
- others_filtered.append(
- '-Xclang ' +
- os.path.join(os.getcwd(),
- os.path.normpath(
- os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
- elif x.startswith('-B'):
- others_filtered.append(
- '-B' +
- os.path.join(os.getcwd(),
- os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
- else:
- others_filtered.append(x)
- others = others_filtered
-
- flags_by_output[cc_file[0]] = {
- 'output': output_name,
- 'depname': dep_name,
- 'defines': sorted(defines),
- 'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
- 'dash_f': sorted(dash_f),
- 'warnings': sorted(warnings),
- 'other': sorted(others),
- }
- return flags_by_output
-
-
-def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
- """Return a report of any differences between gyp and gn lists, ignoring
- anything in |dont_care_{gyp|gn}| respectively."""
- global g_total_differences
- if not dont_care_gyp:
- dont_care_gyp = []
- if not dont_care_gn:
- dont_care_gn = []
- output = ''
- if gyp[name] != gn[name]:
- gyp_set = set(gyp[name])
- gn_set = set(gn[name])
- missing_in_gyp = gyp_set - gn_set
- missing_in_gn = gn_set - gyp_set
- missing_in_gyp -= set(dont_care_gyp)
- missing_in_gn -= set(dont_care_gn)
- if missing_in_gyp or missing_in_gn:
- output += ' %s differ:\n' % name
- if missing_in_gyp:
- output += ' In gyp, but not in GN:\n %s' % '\n '.join(
- sorted(missing_in_gyp)) + '\n'
- g_total_differences += len(missing_in_gyp)
- if missing_in_gn:
- output += ' In GN, but not in gyp:\n %s' % '\n '.join(
- sorted(missing_in_gn)) + '\n\n'
- g_total_differences += len(missing_in_gn)
- return output
-
-
-def Run(command_line):
- """Run |command_line| as a subprocess and return stdout. Raises on error."""
- try:
- return subprocess.check_output(command_line, shell=True)
- except subprocess.CalledProcessError as e:
- # Rescue the output we got until the exception happened.
- print '#### Stdout: ####################################################'
- print e.output
- print '#################################################################'
- raise
-
-
-def main():
- if len(sys.argv) < 4:
- print ('usage: %s gn_outdir gyp_outdir gn_target '
- '[gyp_target1, gyp_target2, ...]' % __file__)
- return 1
-
- if len(sys.argv) == 4:
- sys.argv.append(sys.argv[3])
- gn_out_dir = sys.argv[1]
- print >> sys.stderr, 'Expecting gn outdir in %s...' % gn_out_dir
- gn = Run('ninja -C %s -t commands %s' % (gn_out_dir, sys.argv[3]))
- if sys.platform == 'win32':
- # On Windows flags are stored in .rsp files which are created during build.
- print >> sys.stderr, 'Building in %s...' % gn_out_dir
- Run('ninja -C %s -d keeprsp %s' % (gn_out_dir, sys.argv[3]))
-
- gyp_out_dir = sys.argv[2]
- print >> sys.stderr, 'Expecting gyp outdir in %s...' % gyp_out_dir
- gyp = Run('ninja -C %s -t commands %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
- if sys.platform == 'win32':
- # On Windows flags are stored in .rsp files which are created during build.
- print >> sys.stderr, 'Building in %s...' % gyp_out_dir
- Run('ninja -C %s -d keeprsp %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
-
- all_gyp_flags = GetFlags(gyp.splitlines(),
- os.path.join(os.getcwd(), gyp_out_dir))
- all_gn_flags = GetFlags(gn.splitlines(),
- os.path.join(os.getcwd(), gn_out_dir))
- gyp_files = set(all_gyp_flags.keys())
- gn_files = set(all_gn_flags.keys())
- different_source_list = gyp_files != gn_files
- if different_source_list:
- print 'Different set of sources files:'
- print ' In gyp, not in GN:\n %s' % '\n '.join(
- sorted(gyp_files - gn_files))
- print ' In GN, not in gyp:\n %s' % '\n '.join(
- sorted(gn_files - gyp_files))
- print '\nNote that flags will only be compared for files in both sets.\n'
- file_list = gyp_files & gn_files
- files_with_given_differences = {}
- for filename in sorted(file_list):
- gyp_flags = all_gyp_flags[filename]
- gn_flags = all_gn_flags[filename]
- differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
- differences += CompareLists(gyp_flags, gn_flags, 'defines')
- differences += CompareLists(gyp_flags, gn_flags, 'include_dirs',
- ['-I%s' % os.path.dirname(BASE_DIR)])
- differences += CompareLists(gyp_flags, gn_flags, 'warnings',
- # More conservative warnings in GN we consider to be OK.
- dont_care_gyp=[
- '/wd4091', # 'keyword' : ignored on left of 'type' when no variable
- # is declared.
- '/wd4456', # Declaration hides previous local declaration.
- '/wd4457', # Declaration hides function parameter.
- '/wd4458', # Declaration hides class member.
- '/wd4459', # Declaration hides global declaration.
- '/wd4702', # Unreachable code.
- '/wd4800', # Forcing value to bool 'true' or 'false'.
- '/wd4838', # Conversion from 'type' to 'type' requires a narrowing
- # conversion.
- ] if sys.platform == 'win32' else None,
- dont_care_gn=[
- '-Wendif-labels',
- '-Wextra',
- '-Wsign-compare',
- ] if not sys.platform == 'win32' else None)
- differences += CompareLists(gyp_flags, gn_flags, 'other')
- if differences:
- files_with_given_differences.setdefault(differences, []).append(filename)
-
- for diff, files in files_with_given_differences.iteritems():
- print '\n'.join(sorted(files))
- print diff
-
- print 'Total differences:', g_total_differences
- # TODO(scottmg): Return failure on difference once we're closer to identical.
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/deps/v8/tools/heap-stats/README.md b/deps/v8/tools/heap-stats/README.md
index 70083fe257..9cf6e5673c 100644
--- a/deps/v8/tools/heap-stats/README.md
+++ b/deps/v8/tools/heap-stats/README.md
@@ -6,8 +6,9 @@ maintaining internal state versus actually allocated by the user.
The tool consumes log files produced by d8 (or Chromium) by passing
`--trace-gc-object-stats` or a trace captured using Chrome's tracing
-infrastructure. Chrome trace files need to be unpacked before they can
-be used though.
+infrastructure. Chrome trace files can either be processed as gzip or raw text
+files.
+
Hosting requires a web server, e.g.:
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 0a836d5f6c..16a4b53e49 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -6,15 +6,10 @@
const CATEGORIES = new Map([
[
'user', new Set([
- '*FIXED_ARRAY_CONTEXT_SUB_TYPE',
- '*FIXED_ARRAY_COPY_ON_WRITE_SUB_TYPE',
- '*FIXED_ARRAY_DICTIONARY_PROPERTIES_SUB_TYPE',
- '*FIXED_ARRAY_JS_COLLECTION_SUB_TYPE',
- '*FIXED_ARRAY_JS_WEAK_COLLECTION_SUB_TYPE',
- '*FIXED_ARRAY_PACKED_ELEMENTS_SUB_TYPE',
'CONS_ONE_BYTE_STRING_TYPE',
'CONS_STRING_TYPE',
'DESCRIPTOR_ARRAY_TYPE',
+ 'ELEMENTS_TYPE',
'EXTERNAL_INTERNALIZED_STRING_TYPE',
'EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
'EXTERNAL_ONE_BYTE_STRING_TYPE',
@@ -30,6 +25,9 @@ const CATEGORIES = new Map([
'FIXED_UINT32_ARRAY_TYPE',
'FIXED_UINT8_ARRAY_TYPE',
'FIXED_UINT8_CLAMPED_ARRAY_TYPE',
+ 'FUNCTION_CONTEXT_TYPE',
+ 'GLOBAL_ELEMENTS_TYPE',
+ 'GLOBAL_PROPERTIES_TYPE',
'HEAP_NUMBER_TYPE',
'INTERNALIZED_STRING_TYPE',
'JS_ARGUMENTS_TYPE',
@@ -56,13 +54,17 @@ const CATEGORIES = new Map([
'JS_PROMISE_TYPE',
'JS_REGEXP_TYPE',
'JS_SET_TYPE',
+ 'JS_SET_VALUE_ITERATOR_TYPE',
'JS_STRING_ITERATOR_TYPE',
+ 'JS_TO_WASM_FUNCTION',
'JS_TYPED_ARRAY_TYPE',
'JS_VALUE_TYPE',
'JS_WEAK_MAP_TYPE',
'MUTABLE_HEAP_NUMBER_TYPE',
+ 'NATIVE_CONTEXT_TYPE',
'ONE_BYTE_INTERNALIZED_STRING_TYPE',
'ONE_BYTE_STRING_TYPE',
+ 'OBJECT_PROPERTY_DICTIONARY_TYPE',
'PROPERTY_ARRAY_TYPE',
'SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
'SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
@@ -74,6 +76,9 @@ const CATEGORIES = new Map([
'SYMBOL_TYPE',
'THIN_ONE_BYTE_STRING_TYPE',
'THIN_STRING_TYPE',
+ 'WASM_INSTANCE_TYPE',
+ 'WASM_MEMORY_TYPE',
+ 'WASM_MODULE_TYPE',
])
],
[
@@ -84,26 +89,16 @@ const CATEGORIES = new Map([
'ALLOCATION_MEMENTO_TYPE',
'ALLOCATION_SITE_TYPE',
'BOILERPLATE_ELEMENTS_TYPE',
- 'BOILERPLATE_NAME_DICTIONARY_TYPE',
'BOILERPLATE_PROPERTY_ARRAY_TYPE',
+ 'BOILERPLATE_PROPERTY_DICTIONARY_TYPE',
'BYTE_ARRAY_TYPE',
'CELL_TYPE',
+ 'CODE_STUBS_TABLE_TYPE',
'CONTEXT_EXTENSION_TYPE',
- '*FIXED_ARRAY_DEPENDENT_CODE_SUB_TYPE',
- '*FIXED_ARRAY_ENUM_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_ENUM_INDICES_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_NUMBER_STRING_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_PROTOTYPE_USERS_SUB_TYPE',
- '*FIXED_ARRAY_REGEXP_MULTIPLE_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_RETAINED_MAPS_SUB_TYPE',
- '*FIXED_ARRAY_SCOPE_INFO_SUB_TYPE',
- '*FIXED_ARRAY_SCRIPT_LIST_SUB_TYPE',
- '*FIXED_ARRAY_SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_STRING_SPLIT_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_TEMPLATE_INFO_SUB_TYPE',
- '*FIXED_ARRAY_WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE',
+ 'ENUM_CACHE_TYPE',
+ 'ENUM_INDICES_CACHE_TYPE',
'FOREIGN_TYPE',
+ 'FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE',
'FUNCTION_TEMPLATE_INFO_TYPE',
'INTERCEPTOR_INFO_TYPE',
'JS_API_OBJECT_TYPE',
@@ -111,41 +106,65 @@ const CATEGORIES = new Map([
'JS_OBJECT_BOILERPLATE_TYPE',
'JS_SPECIAL_API_OBJECT_TYPE',
'MAP_TYPE',
+ 'NUMBER_STRING_CACHE_TYPE',
'OBJECT_TEMPLATE_INFO_TYPE',
+ 'OBJECT_TO_CODE_TYPE',
'ODDBALL_TYPE',
'PROMISE_REACTION_JOB_INFO_TYPE',
'PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE',
'PROPERTY_CELL_TYPE',
'PROTOTYPE_INFO_TYPE',
+ 'PROTOTYPE_USERS_TYPE',
+ 'REGEXP_MULTIPLE_CACHE_TYPE',
+ 'RETAINED_MAPS_TYPE',
+ 'SCOPE_INFO_TYPE',
+ 'SCRIPT_LIST_TYPE',
+ 'SCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+ 'SERIALIZED_OBJECTS_TYPE',
+ 'SINGLE_CHARACTER_STRING_CACHE_TYPE',
'STACK_FRAME_INFO_TYPE',
+ 'STRING_SPLIT_CACHE_TYPE',
+ 'STRING_TABLE_TYPE',
'TRANSITION_ARRAY_TYPE',
'WEAK_CELL_TYPE',
+ 'WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE',
])
],
[
'code', new Set([
- '*CODE_BUILTIN',
- '*CODE_BYTECODE_HANDLER',
- '*CODE_OPTIMIZED_FUNCTION',
- '*CODE_REGEXP',
- '*CODE_STUB',
- '*FIXED_ARRAY_BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE',
- '*FIXED_ARRAY_BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_CODE_STUBS_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_COMPILATION_CACHE_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_DEOPTIMIZATION_DATA_SUB_TYPE',
- '*FIXED_ARRAY_EMBEDDED_OBJECT_SUB_TYPE',
- '*FIXED_ARRAY_HANDLER_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE',
- '*FIXED_ARRAY_OPTIMIZED_CODE_LITERALS_SUB_TYPE',
- '*FIXED_ARRAY_SHARED_FUNCTION_INFOS_SUB_TYPE',
+ 'BUILTIN',
+ 'BYTECODE_HANDLER',
+ 'OPTIMIZED_FUNCTION',
+ 'REGEXP',
+ 'STUB',
+ 'BYTECODE_ARRAY_CONSTANT_POOL_TYPE',
+ 'BYTECODE_ARRAY_HANDLER_TABLE_TYPE',
'BYTECODE_ARRAY_TYPE',
'CODE_DATA_CONTAINER_TYPE',
+ 'DEOPTIMIZATION_DATA_TYPE',
+ 'EMBEDDED_OBJECT_TYPE',
+ 'FEEDBACK_METADATA_TYPE',
+ 'FEEDBACK_VECTOR_HEADER_TYPE',
+ 'FEEDBACK_VECTOR_ENTRY_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_CALL_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_ENUM_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_LOAD_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_OTHER_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_STORE_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE',
'FEEDBACK_VECTOR_TYPE',
'LOAD_HANDLER_TYPE',
+ 'NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+ 'OPTIMIZED_CODE_LITERALS_TYPE',
+ 'SCRIPT_SOURCE_EXTERNAL_TYPE',
+ 'SCRIPT_SOURCE_NON_EXTERNAL_TYPE',
'SCRIPT_TYPE',
'SHARED_FUNCTION_INFO_TYPE',
'STORE_HANDLER_TYPE',
+ 'UNCOMPILED_JS_FUNCTION_TYPE',
+ 'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE',
])
],
['unclassified', new Set()],
@@ -158,10 +177,3 @@ const CATEGORY_NAMES = new Map([
['code', 'Code'],
['unclassified', 'Unclassified'],
]);
-
-// Instance types that are constructed from their sub types and
-// should thus be hidden.
-const IGNORED_INSTANCE_TYPES = new Set([
- 'FIXED_ARRAY_TYPE',
- 'CODE_TYPE',
-]);
diff --git a/deps/v8/tools/heap-stats/details-selection.html b/deps/v8/tools/heap-stats/details-selection.html
index d60aef9669..4680e8e4a1 100644
--- a/deps/v8/tools/heap-stats/details-selection.html
+++ b/deps/v8/tools/heap-stats/details-selection.html
@@ -3,6 +3,10 @@ Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
<template id="details-selection-template">
<style>
+#dataSelectionSection {
+ display: none;
+}
+
.box {
border-left: dashed 1px #666666;
border-right: dashed 1px #666666;
@@ -20,9 +24,20 @@ found in the LICENSE file. -->
border-radius: 0px 0px 5px 5px;
}
-span {
- display: block;
- padding: 5px;
+.box > ul {
+ margin: 0px;
+ padding: 0px;
+}
+
+.box > ul > li {
+ display: inline-block;
+}
+
+.box > ul > li:not(:first-child) {
+ margin-left: 10px;
+}
+
+.box > ul > li:first-child {
font-weight: bold;
}
@@ -38,35 +53,58 @@ span {
#categories {
margin-top: 10px;
}
-</style>
-<h2>Data selection</h2>
-<ul>
- <li>
- <label for="isolate-select">
- Isolate
- </label>
- <select id="isolate-select">
- <option>No data</option>
- </select>
- </li>
- <li>
- <label for="dataset-select">
- Data set
- </label>
- <select id="dataset-select">
- <option>No data</option>
- </select>
- </li>
- <li>
- <input type="checkbox" id="merge-categories" checked=checked />
- <label for="merge-categories">
- Merge categories
- </label>
- </li>
-</ul>
+#category-filter {
+ text-align: right;
+ width: 50px;
+}
+
+</style>
+<section id="dataSelectionSection">
+ <h2>Data selection</h2>
+ <ul>
+ <li>
+ <label for="isolate-select">
+ Isolate
+ </label>
+ <select id="isolate-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <label for="dataset-select">
+ Data set
+ </label>
+ <select id="dataset-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <input type="checkbox" id="merge-categories" checked=checked />
+ <label for="merge-categories">
+ Merge categories
+ </label>
+ </li>
+ <li>
+ <label for="gc-select">
+ Garbage collection (at a specific time in ms)
+ </label>
+ <select id="gc-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <input id="category-filter" type="text" value="0" disabled="disabled" />KB
+ <button id="category-filter-btn" disabled="disabled">Filter categories with less memory</button>
+ </li>
+ <li>
+ <button id="csv-export-btn" disabled="disabled">Export selection as CSV</button>
+ </li>
+ </ul>
-<div id="categories"></div>
+ <div id="categories"></div>
+</section>
</template>
+
<script type="text/javascript" src="categories.js"></script>
-<script type="text/javascript" src="details-selection.js"></script> \ No newline at end of file
+<script type="text/javascript" src="details-selection.js"></script>
diff --git a/deps/v8/tools/heap-stats/details-selection.js b/deps/v8/tools/heap-stats/details-selection.js
index 43c000d3f4..c5117d3165 100644
--- a/deps/v8/tools/heap-stats/details-selection.js
+++ b/deps/v8/tools/heap-stats/details-selection.js
@@ -17,8 +17,14 @@ class DetailsSelection extends HTMLElement {
'change', e => this.handleIsolateChange(e));
this.datasetSelect.addEventListener(
'change', e => this.notifySelectionChanged(e));
+ this.gcSelect.addEventListener(
+ 'change', e => this.notifySelectionChanged(e));
+ this.$('#csv-export-btn')
+ .addEventListener('click', e => this.exportCurrentSelection(e));
this.$('#merge-categories')
.addEventListener('change', e => this.notifySelectionChanged(e));
+ this.$('#category-filter-btn')
+ .addEventListener('click', e => this.filterCurrentSeclection(e));
}
connectedCallback() {
@@ -36,19 +42,36 @@ class DetailsSelection extends HTMLElement {
return this._data;
}
+ get selectedData() {
+ console.assert(this.data, 'invalid data');
+ console.assert(this.selection, 'invalid selection');
+ return this.data[this.selection.isolate]
+ .gcs[this.selection.gc][this.selection.data_set];
+ }
+
buildCategory(name) {
const div = document.createElement('div');
div.id = name;
div.classList.add('box');
- const span = document.createElement('span');
- div.appendChild(span);
- span.innerHTML = CATEGORY_NAMES.get(name) + ' ';
+ const ul = document.createElement('ul');
+ div.appendChild(ul);
+ const name_li = document.createElement('li');
+ ul.appendChild(name_li);
+ name_li.innerHTML = CATEGORY_NAMES.get(name);
+ const percent_li = document.createElement('li');
+ ul.appendChild(percent_li);
+ percent_li.innerHTML = '0%';
+ percent_li.id = name + 'PercentContent';
+ const all_li = document.createElement('li');
+ ul.appendChild(all_li);
const all_button = document.createElement('button');
- span.appendChild(all_button);
+ all_li.appendChild(all_button);
all_button.innerHTML = 'All';
all_button.addEventListener('click', e => this.selectCategory(name));
+ const none_li = document.createElement('li');
+ ul.appendChild(none_li);
const none_button = document.createElement('button');
- span.appendChild(none_button);
+ none_li.appendChild(none_button);
none_button.innerHTML = 'None';
none_button.addEventListener('click', e => this.unselectCategory(name));
const innerDiv = document.createElement('div');
@@ -69,17 +92,35 @@ class DetailsSelection extends HTMLElement {
return this.$('#isolate-select');
}
+ get gcSelect() {
+ return this.$('#gc-select');
+ }
+
dataChanged() {
- this.clearUI();
- this.populateSelect('#isolate-select', Object.keys(this.data));
+ this.selection = {categories: {}};
+ this.resetUI(true);
+ this.populateIsolateSelect();
this.handleIsolateChange();
+ this.$('#dataSelectionSection').style.display = 'block';
}
- clearUI() {
- this.selection = {categories: {}};
- removeAllChildren(this.isolateSelect);
+ populateIsolateSelect() {
+ let entries = Object.entries(this.data);
+ // Sorty by peak heap memory consumption.
+ entries.sort((a, b) => b[1].peakMemory - a[1].peakMemory);
+ this.populateSelect(
+ '#isolate-select', entries, (key, isolate) => isolate.getLabel());
+ }
+
+ resetUI(resetIsolateSelect) {
+ if (resetIsolateSelect) removeAllChildren(this.isolateSelect);
+
removeAllChildren(this.datasetSelect);
+ removeAllChildren(this.gcSelect);
this.clearCategories();
+ this.$('#csv-export-btn').disabled = 'disabled';
+ this.$('#category-filter-btn').disabled = 'disabled';
+ this.$('#category-filter').disabled = 'disabled';
}
handleIsolateChange(e) {
@@ -88,9 +129,15 @@ class DetailsSelection extends HTMLElement {
this.selection.isolate = null;
return;
}
-
+ this.resetUI(false);
+ this.populateSelect(
+ '#dataset-select',
+ this.data[this.selection.isolate].data_sets.entries(), null, 'live');
this.populateSelect(
- '#dataset-select', this.data[this.selection.isolate].data_sets, 'live');
+ '#gc-select',
+ Object.keys(this.data[this.selection.isolate].gcs)
+ .map(v => [v, this.data[this.selection.isolate].gcs[v].time]),
+ time => time + 'ms');
this.populateCategories();
this.notifySelectionChanged();
}
@@ -106,10 +153,53 @@ class DetailsSelection extends HTMLElement {
this.selection.category_names = CATEGORY_NAMES;
this.selection.data_set = this.datasetSelect.value;
this.selection.merge_categories = this.$('#merge-categories').checked;
+ this.selection.gc = this.gcSelect.value;
+ this.$('#csv-export-btn').disabled = false;
+ this.$('#category-filter-btn').disabled = false;
+ this.$('#category-filter').disabled = false;
+ this.updatePercentagesInCategory();
this.dispatchEvent(new CustomEvent(
'change', {bubbles: true, composed: true, detail: this.selection}));
}
+ filterCurrentSeclection(e) {
+ const filter_value = this.$('#category-filter').value * KB;
+ if (filter_value === 0) return;
+
+ this.selection.category_names.forEach((_, category) => {
+ for (let checkbox of this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]')) {
+ checkbox.checked =
+ this.selectedData.instance_type_data[checkbox.instance_type]
+ .overall > filter_value;
+ }
+ });
+ this.notifySelectionChanged();
+ }
+
+ updatePercentagesInCategory() {
+ const overalls = {};
+ let overall = 0;
+ // Reset all categories.
+ this.selection.category_names.forEach((_, category) => {
+ this.$(`#${category}PercentContent`).innerHTML = '0%';
+ });
+ // Only update categories that have selections.
+ Object.entries(this.selection.categories).forEach(([category, value]) => {
+ overalls[category] =
+ Object.values(value).reduce(
+ (accu, current) =>
+ accu + this.selectedData.instance_type_data[current].overall,
+ 0) /
+ KB;
+ overall += overalls[category];
+ });
+ Object.entries(overalls).forEach(([category, category_overall]) => {
+ this.$(`#${category}PercentContent`).innerHTML =
+ `${(category_overall / overall * 100).toFixed(1)}%`;
+ });
+ }
+
selectedInCategory(category) {
const selected = this.shadowRoot.querySelectorAll(
'input[name=' + category + 'Checkbox]:checked');
@@ -125,17 +215,19 @@ class DetailsSelection extends HTMLElement {
return 'unclassified';
}
- createOption(text) {
+ createOption(value, text) {
const option = document.createElement('option');
- option.value = text;
+ option.value = value;
option.text = text;
return option;
}
- populateSelect(id, iterable, autoselect = null) {
- for (let option_value of iterable) {
- const option = this.createOption(option_value);
- if (autoselect === option_value) {
+ populateSelect(id, iterable, labelFn = null, autoselect = null) {
+ if (labelFn == null) labelFn = e => e;
+ for (let [key, value] of iterable) {
+ const label = labelFn(key, value);
+ const option = this.createOption(key, label);
+ if (autoselect === key) {
option.selected = 'selected';
}
this.$(id).appendChild(option);
@@ -160,7 +252,6 @@ class DetailsSelection extends HTMLElement {
for (let instance_type of this.data[this.selection.isolate]
.non_empty_instance_types) {
- if (IGNORED_INSTANCE_TYPES.has(instance_type)) continue;
const category = this.categoryForType(instance_type);
categories[category].push(instance_type);
}
@@ -198,6 +289,7 @@ class DetailsSelection extends HTMLElement {
input.name = category + 'Checkbox';
input.checked = 'checked';
input.id = instance_type + 'Checkbox';
+ input.instance_type = instance_type;
input.value = instance_type;
input.addEventListener('change', e => this.notifySelectionChanged(e));
const label = document.createElement('label');
@@ -206,6 +298,33 @@ class DetailsSelection extends HTMLElement {
label.htmlFor = instance_type + 'Checkbox';
return div;
}
+
+ exportCurrentSelection(e) {
+ const data = [];
+ const selected_data = this.data[this.selection.isolate]
+ .gcs[this.selection.gc][this.selection.data_set]
+ .instance_type_data;
+ Object.values(this.selection.categories).forEach(instance_types => {
+ instance_types.forEach(instance_type => {
+ data.push([instance_type, selected_data[instance_type].overall / KB]);
+ });
+ });
+ const createInlineContent = arrayOfRows => {
+ const content = arrayOfRows.reduce(
+ (accu, rowAsArray) => {return accu + `${rowAsArray.join(',')}\n`},
+ '');
+ return `data:text/csv;charset=utf-8,${content}`;
+ };
+ const encodedUri = encodeURI(createInlineContent(data));
+ const link = document.createElement('a');
+ link.setAttribute('href', encodedUri);
+ link.setAttribute(
+ 'download',
+ `heap_objects_data_${this.selection.isolate}_${this.selection.gc}.csv`);
+ this.shadowRoot.appendChild(link);
+ link.click();
+ this.shadowRoot.removeChild(link);
+ }
}
customElements.define('details-selection', DetailsSelection);
diff --git a/deps/v8/tools/heap-stats/global-timeline.js b/deps/v8/tools/heap-stats/global-timeline.js
index 0533f21432..f807d44bae 100644
--- a/deps/v8/tools/heap-stats/global-timeline.js
+++ b/deps/v8/tools/heap-stats/global-timeline.js
@@ -86,7 +86,6 @@ class GlobalTimeline extends HTMLElement {
}
getInstanceTypeData() {
- const categories = Object.keys(this.selection.categories);
const instance_types =
Object.values(this.selection.categories)
.reduce((accu, current) => accu.concat(current), []);
diff --git a/deps/v8/tools/heap-stats/histogram-viewer.html b/deps/v8/tools/heap-stats/histogram-viewer.html
new file mode 100644
index 0000000000..93fe980978
--- /dev/null
+++ b/deps/v8/tools/heap-stats/histogram-viewer.html
@@ -0,0 +1,19 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="histogram-viewer-template">
+<style>
+#chart {
+ width: 100%;
+ height: 800px;
+}
+</style>
+<div id="container" style="display: none;">
+ <h2>Details</h2>
+ <ul>
+ <li><span id="overall"></span></li>
+ </ul>
+ <div id="chart"></div>
+</div>
+</template>
+<script type="text/javascript" src="histogram-viewer.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/histogram-viewer.js b/deps/v8/tools/heap-stats/histogram-viewer.js
new file mode 100644
index 0000000000..bea1e70800
--- /dev/null
+++ b/deps/v8/tools/heap-stats/histogram-viewer.js
@@ -0,0 +1,152 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const histogram_viewer_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#histogram-viewer-template');
+
+class HistogramViewer extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(histogram_viewer_template.content.cloneNode(true));
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ set data(value) {
+ this._data = value;
+ this.stateChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ set selection(value) {
+ this._selection = value;
+ this.stateChanged();
+ }
+
+ get selection() {
+ return this._selection;
+ }
+
+ isValid() {
+ return this.data && this.selection;
+ }
+
+ hide() {
+ this.$('#container').style.display = 'none';
+ }
+
+ show() {
+ this.$('#container').style.display = 'block';
+ }
+
+ stateChanged() {
+ if (this.isValid()) {
+ const overall_bytes = (this.selection.merge_categories) ?
+ this.getPropertyForCategory('overall') :
+ this.getPropertyForInstanceTypes('overall');
+ this.$('#overall').innerHTML = `Overall: ${overall_bytes / KB} KB`;
+ this.drawChart();
+ } else {
+ this.hide();
+ }
+ }
+
+ get selectedData() {
+ console.assert(this.data, 'invalid data');
+ console.assert(this.selection, 'invalid selection');
+ return this.data[this.selection.isolate]
+ .gcs[this.selection.gc][this.selection.data_set];
+ }
+
+ get selectedInstanceTypes() {
+ console.assert(this.selection, 'invalid selection');
+ return Object.values(this.selection.categories)
+ .reduce((accu, current) => accu.concat(current), []);
+ }
+
+ getPropertyForCategory(property) {
+ return Object.values(this.selection.categories)
+ .reduce(
+ (outer_accu, instance_types) => outer_accu +
+ instance_types.reduce(
+ (inner_accu, instance_type) => inner_accu +
+ this.selectedData
+ .instance_type_data[instance_type][property],
+ 0),
+ 0);
+ }
+
+ getPropertyForInstanceTypes(property) {
+ return this.selectedInstanceTypes.reduce(
+ (accu, instance_type) => accu +
+ this.selectedData.instance_type_data[instance_type][property],
+ 0);
+ }
+
+ getCategoryData() {
+ const labels = [
+ 'Bucket',
+ ...Object.keys(this.selection.categories)
+ .map(k => this.selection.category_names.get(k))
+ ];
+ const data = this.selectedData.bucket_sizes.map(
+ (bucket_size, index) =>
+ [`<${bucket_size}`,
+ ...Object.values(this.selection.categories)
+ .map(
+ instance_types =>
+ instance_types
+ .map(
+ instance_type =>
+ this.selectedData
+ .instance_type_data[instance_type]
+ .histogram[index])
+ .reduce((accu, current) => accu + current, 0))]);
+ // Adjust last histogram bucket label.
+ data[data.length - 1][0] = 'rest';
+ return [labels, ...data];
+ }
+
+ getInstanceTypeData() {
+ const instance_types = this.selectedInstanceTypes;
+ const labels = ['Bucket', ...instance_types];
+ const data = this.selectedData.bucket_sizes.map(
+ (bucket_size, index) =>
+ [`<${bucket_size}`,
+ ...instance_types.map(
+ instance_type =>
+ this.selectedData.instance_type_data[instance_type]
+ .histogram[index])]);
+ // Adjust last histogram bucket label.
+ data[data.length - 1][0] = 'rest';
+ return [labels, ...data];
+ }
+
+ drawChart() {
+ const chart_data = (this.selection.merge_categories) ?
+ this.getCategoryData() :
+ this.getInstanceTypeData();
+ const data = google.visualization.arrayToDataTable(chart_data);
+ const options = {
+ legend: {position: 'top', maxLines: '1'},
+ chartArea: {width: '85%', height: '85%'},
+ bar: {groupWidth: '80%'},
+ explorer: {},
+ };
+ const chart = new google.visualization.BarChart(this.$('#chart'));
+ this.show();
+ chart.draw(data, options);
+ }
+}
+
+customElements.define('histogram-viewer', HistogramViewer);
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
index 3c2e62b6d0..3762502201 100644
--- a/deps/v8/tools/heap-stats/index.html
+++ b/deps/v8/tools/heap-stats/index.html
@@ -8,15 +8,20 @@ found in the LICENSE file. -->
<head>
<meta charset="UTF-8">
<title>V8 Heap Statistics</title>
- <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet' type='text/css'>
- <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+ <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet'>
+ <script
+ src="https://www.gstatic.com/charts/loader.js"></script>
+ <script
+ src="https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.6/pako_inflate.min.js"
+ integrity="sha256-N1z6ddQzX83fjw8v7uSNe7/MgOmMKdwFUv1+AJMDqNM="
+ crossorigin="anonymous"></script>
<link rel="import" href="details-selection.html">
<link rel="import" href="global-timeline.html">
+ <link rel="import" href="histogram-viewer.html">
<link rel="import" href="trace-file-reader.html">
- <style type="text/css">
-
+ <style>
body {
font-family: 'Roboto', sans-serif;
margin-left: 5%;
@@ -24,11 +29,11 @@ body {
}
</style>
- <script type="text/javascript">
+ <script>
'use strict';
-google.charts.load('current', {'packages':['line', 'corechart']});
+google.charts.load('current', {'packages':['line', 'corechart', 'bar']});
function $(id) { return document.querySelector(id); }
@@ -47,15 +52,16 @@ function globalDataChanged(e) {
state.selection = null;
$('#global-timeline').selection = state.selection;
$('#global-timeline').data = state.data;
- $('#type-details').selection = state.selection;
- $('#type-details').data = state.data;
+ $('#histogram-viewer').selection = state.selection;
+ $('#histogram-viewer').data = state.data;
$('#details-selection').data = state.data;
}
function globalSelectionChangedA(e) {
state.selection = e.detail;
+ console.log(state.selection);
$('#global-timeline').selection = state.selection;
- $('#type-details').selection = state.selection;
+ $('#histogram-viewer').selection = state.selection;
}
</script>
@@ -63,16 +69,20 @@ function globalSelectionChangedA(e) {
<body>
<trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+
+ <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+ <global-timeline id="global-timeline"></global-timeline>
+ <histogram-viewer id="histogram-viewer"></histogram-viewer>
+
<h1>V8 Heap Statistics</h1>
<p>Visualize object statistics that have been gathered using</p>
<ul>
- <li><code>--trace-gc-object-stats on V8</code></li>
+ <li><code>--trace-gc-object-stats</code> on V8</li>
<li>
<a
href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
tracing infrastructure</a> collecting data for the category
- <code>v8.gc_stats</code>. The trace file needs to be unpacked (e.g. using
- <code>gunzip</code>).
+ <code>v8.gc_stats</code>.
</li>
</ul>
<p>
@@ -80,9 +90,6 @@ function globalSelectionChangedA(e) {
requiring <a
href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
</p>
- <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
- <global-timeline id="global-timeline"></global-timeline>
- <type-details id="type-details"></type-details>
</body>
</html>
diff --git a/deps/v8/tools/heap-stats/model.js b/deps/v8/tools/heap-stats/model.js
new file mode 100644
index 0000000000..1afd10a563
--- /dev/null
+++ b/deps/v8/tools/heap-stats/model.js
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+class Isolate {
+ constructor(address) {
+ this.address = address;
+ this.start = null;
+ this.end = null;
+ this.samples = Object.create(null);
+ this.non_empty_instance_types = new Set();
+ this.gcs = Object.create(null);
+ this.zonetags = [];
+ this.samples = {zone: {}};
+ this.data_sets = new Set();
+ this.peakMemory = 0;
+ }
+
+ finalize() {
+ Object.values(this.gcs).forEach(gc => this.finalizeGC(gc));
+ }
+
+ getLabel() {
+ let label = `${this.address}: gc=#${Object.keys(this.gcs).length}`;
+ const peakSizeMB = Math.round(this.peakMemory / 1024 / 1024 * 100) / 100;
+ label += ` max=${peakSizeMB}MB`
+ return label;
+ }
+
+ finalizeGC(gc_data) {
+ this.data_sets.forEach(key => this.finalizeDataSet(gc_data[key]));
+ if ('live' in gc_data) {
+ this.peakMemory = Math.max(this.peakMemory, gc_data['live'].overall);
+ }
+ }
+
+ finalizeDataSet(data_set) {
+ // Create a ranked instance type array that sorts instance types by
+ // memory size (overall).
+ data_set.ranked_instance_types =
+ [...data_set.non_empty_instance_types].sort(function(a, b) {
+ if (data_set.instance_type_data[a].overall >
+ data_set.instance_type_data[b].overall) {
+ return 1;
+ } else if (
+ data_set.instance_type_data[a].overall <
+ data_set.instance_type_data[b].overall) {
+ return -1;
+ }
+ return 0;
+ });
+
+ Object.entries(data_set.instance_type_data).forEach(([name, entry]) => {
+ this.checkHistogram(
+ name, entry, data_set.bucket_sizes, 'histogram', ' overall');
+ this.checkHistogram(
+ name, entry, data_set.bucket_sizes, 'over_allocated_histogram',
+ ' over_allocated');
+ });
+ }
+
+ // Check that a lower bound for histogram memory does not exceed the
+ // overall counter.
+ checkHistogram(type, entry, bucket_sizes, histogram, overallProperty) {
+ let sum = 0;
+ for (let i = 1; i < entry[histogram].length; i++) {
+ sum += entry[histogram][i] * bucket_sizes[i - 1];
+ }
+ const overall = entry[overallProperty];
+ if (sum >= overall) {
+ console.error(
+ `${type}: sum('${histogram}') > overall (${sum} > ${overall})`);
+ }
+ }
+}
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.html b/deps/v8/tools/heap-stats/trace-file-reader.html
index 98c2ef0c60..73de98ab03 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.html
+++ b/deps/v8/tools/heap-stats/trace-file-reader.html
@@ -10,17 +10,66 @@ found in the LICENSE file. -->
text-align: center;
border: solid 1px #000000;
border-radius: 5px;
+ cursor: pointer;
+}
+
+.loading #fileReader {
+ cursor: wait;
}
#fileReader > input {
display: none;
}
+
+#loader {
+ display: none;
+}
+
+.loading #loader{
+ display: block;
+ position: fixed;
+ top: 0px;
+ left: 0px;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+ position: absolute;
+ width: 100px;
+ height: 100px;
+ top: 40%;
+ left: 50%;
+ margin-left: -50px;
+ border: 30px solid #000;
+ border-top: 30px solid #36E;
+ border-radius: 50%;
+ animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ 0% {
+ transform: rotate(0deg);
+ };
+ 100% {
+ transform: rotate(360deg);
+ };
+}
</style>
-<div id="fileReader">
- <span id="label">
- Drag and drop a trace file into this area, or click to choose from disk.
- </span>
- <input id="file" type="file" name="file" />
-</div>
+
+<section id="fileReaderSection">
+ <div id="fileReader">
+ <span id="label">
+ Drag and drop a trace file into this area, or click to choose from disk.
+ </span>
+ <input id="file" type="file" name="file" />
+ </div>
+ <div id="loader">
+ <div id="spinner"></div>
+ </div>
+</section>
</template>
-<script type="text/javascript" src="trace-file-reader.js"></script>
+<script type="text/javascript" src="model.js"></script>
+
+<script src="trace-file-reader.js"></script>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index 59825fe514..ef563a43cb 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -23,6 +23,10 @@ class TraceFileReader extends HTMLElement {
return this.shadowRoot.querySelector(id);
}
+ get section() {
+ return this.$('#fileReaderSection');
+ }
+
updateLabel(text) {
this.$('#label').innerText = text;
}
@@ -50,34 +54,42 @@ class TraceFileReader extends HTMLElement {
return;
}
- const result = new FileReader();
- result.onload = (e) => {
- let contents = e.target.result.split('\n');
- const return_data = (e.target.result.includes('V8.GC_Objects_Stats')) ?
- this.createModelFromChromeTraceFile(contents) :
- this.createModelFromV8TraceFile(contents);
- this.updateLabel('Finished loading \'' + file.name + '\'.');
- this.dispatchEvent(new CustomEvent(
- 'change', {bubbles: true, composed: true, detail: return_data}));
- };
- result.readAsText(file);
+ this.section.className = 'loading';
+ const reader = new FileReader();
+
+ if (['application/gzip', 'application/x-gzip'].includes(file.type)) {
+ reader.onload = (e) => {
+ try {
+ const textResult = pako.inflate(e.target.result, {to: 'string'});
+ this.processRawText(file, textResult);
+ this.section.className = 'success';
+ } catch (err) {
+ console.error(err);
+ this.section.className = 'failure';
+ }
+ };
+ reader.readAsArrayBuffer(file);
+ } else {
+ reader.onload = (e) => this.processRawText(file, e.target.result);
+ reader.readAsText(file);
+ }
}
- createOrUpdateEntryIfNeeded(data, keys, entry) {
+ processRawText(file, result) {
+ let contents = result.split('\n');
+ const return_data = (result.includes('V8.GC_Objects_Stats')) ?
+ this.createModelFromChromeTraceFile(contents) :
+ this.createModelFromV8TraceFile(contents);
+ this.extendAndSanitizeModel(return_data);
+ this.updateLabel('Finished loading \'' + file.name + '\'.');
+ this.dispatchEvent(new CustomEvent(
+ 'change', {bubbles: true, composed: true, detail: return_data}));
+ }
+
+ createOrUpdateEntryIfNeeded(data, entry) {
console.assert(entry.isolate, 'entry should have an isolate');
- if (!(entry.isolate in keys)) {
- keys[entry.isolate] = new Set();
- }
if (!(entry.isolate in data)) {
- data[entry.isolate] = {
- non_empty_instance_types: new Set(),
- gcs: {},
- zonetags: [],
- samples: {zone: {}},
- start: null,
- end: null,
- data_sets: new Set()
- };
+ data[entry.isolate] = new Isolate(entry.isolate);
}
const data_object = data[entry.isolate];
if (('id' in entry) && !(entry.id in data_object.gcs)) {
@@ -91,7 +103,7 @@ class TraceFileReader extends HTMLElement {
}
}
- createDatasetIfNeeded(data, keys, entry, data_set) {
+ createDatasetIfNeeded(data, entry, data_set) {
if (!(data_set in data[entry.isolate].gcs[entry.id])) {
data[entry.isolate].gcs[entry.id][data_set] = {
instance_type_data: {},
@@ -102,9 +114,7 @@ class TraceFileReader extends HTMLElement {
}
}
- addInstanceTypeData(
- data, keys, isolate, gc_id, data_set, instance_type, entry) {
- keys[isolate].add(data_set);
+ addInstanceTypeData(data, isolate, gc_id, data_set, instance_type, entry) {
data[isolate].gcs[gc_id][data_set].instance_type_data[instance_type] = {
overall: entry.overall,
count: entry.count,
@@ -121,117 +131,75 @@ class TraceFileReader extends HTMLElement {
}
}
- extendAndSanitizeModel(data, keys) {
+ extendAndSanitizeModel(data) {
const checkNonNegativeProperty = (obj, property) => {
console.assert(obj[property] >= 0, 'negative property', obj, property);
};
- for (const isolate of Object.keys(data)) {
- for (const gc of Object.keys(data[isolate].gcs)) {
- for (const data_set_key of keys[isolate]) {
- const data_set = data[isolate].gcs[gc][data_set_key];
- // 1. Create a ranked instance type array that sorts instance
- // types by memory size (overall).
- data_set.ranked_instance_types =
- [...data_set.non_empty_instance_types].sort(function(a, b) {
- if (data_set.instance_type_data[a].overall >
- data_set.instance_type_data[b].overall) {
- return 1;
- } else if (
- data_set.instance_type_data[a].overall <
- data_set.instance_type_data[b].overall) {
- return -1;
- }
- return 0;
- });
-
- let known_count = 0;
- let known_overall = 0;
- let known_histogram =
- Array(
- data_set.instance_type_data.FIXED_ARRAY_TYPE.histogram.length)
- .fill(0);
- for (const instance_type in data_set.instance_type_data) {
- if (!instance_type.startsWith('*FIXED_ARRAY')) continue;
- const subtype = data_set.instance_type_data[instance_type];
- known_count += subtype.count;
- known_overall += subtype.count;
- for (let i = 0; i < subtype.histogram.length; i++) {
- known_histogram[i] += subtype.histogram[i];
- }
- }
-
- const fixed_array_data = data_set.instance_type_data.FIXED_ARRAY_TYPE;
- const unknown_entry = {
- count: fixed_array_data.count - known_count,
- overall: fixed_array_data.overall - known_overall,
- histogram: fixed_array_data.histogram.map(
- (value, index) => value - known_histogram[index])
- };
-
- // Check for non-negative values.
- checkNonNegativeProperty(unknown_entry, 'count');
- checkNonNegativeProperty(unknown_entry, 'overall');
- for (let i = 0; i < unknown_entry.histogram.length; i++) {
- checkNonNegativeProperty(unknown_entry.histogram, i);
- }
-
- data_set.instance_type_data['*FIXED_ARRAY_UNKNOWN_SUB_TYPE'] =
- unknown_entry;
- data_set.non_empty_instance_types.add(
- '*FIXED_ARRAY_UNKNOWN_SUB_TYPE');
- }
- }
- }
+ Object.values(data).forEach(isolate => isolate.finalize());
}
createModelFromChromeTraceFile(contents) {
- console.log('Processing log as chrome trace file.');
- const data = Object.create(null); // Final data container.
- const keys = Object.create(null); // Collecting 'keys' per isolate.
+ // Trace files support two formats.
+ // {traceEvents: [ data ]}
+ const kObjectTraceFile = {
+ name: 'object',
+ endToken: ']}',
+ getDataArray: o => o.traceEvents
+ };
+ // [ data ]
+ const kArrayTraceFile = {
+ name: 'array',
+ endToken: ']',
+ getDataArray: o => o
+ };
+ const handler =
+ (contents[0][0] === '{') ? kObjectTraceFile : kArrayTraceFile;
+ console.log(`Processing log as chrome trace file (${handler.name}).`);
// Pop last line in log as it might be broken.
contents.pop();
// Remove trailing comma.
contents[contents.length - 1] = contents[contents.length - 1].slice(0, -1);
// Terminate JSON.
- const sanitized_contents = [...contents, ']}'].join('');
+ const sanitized_contents = [...contents, handler.endToken].join('');
+
+ const data = Object.create(null); // Final data container.
try {
const raw_data = JSON.parse(sanitized_contents);
- const objects_stats_data =
- raw_data.traceEvents.filter(e => e.name == 'V8.GC_Objects_Stats');
- objects_stats_data.forEach(trace_data => {
- const actual_data = trace_data.args;
- const data_sets = new Set(Object.keys(actual_data));
- Object.keys(actual_data).forEach(data_set => {
- const string_entry = actual_data[data_set];
- try {
- const entry = JSON.parse(string_entry);
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
- this.createDatasetIfNeeded(data, keys, entry, data_set);
- const isolate = entry.isolate;
- const time = entry.time;
- const gc_id = entry.id;
- data[isolate].gcs[gc_id].time = time;
- data[isolate].gcs[gc_id][data_set].bucket_sizes =
- entry.bucket_sizes;
- for (let [instance_type, value] of Object.entries(
- entry.type_data)) {
- // Trace file format uses markers that do not have actual
- // properties.
- if (!('overall' in value)) continue;
- this.addInstanceTypeData(
- data, keys, isolate, gc_id, data_set, instance_type, value);
- }
- } catch (e) {
- console.log('Unable to parse data set entry', e);
- }
- });
- });
+ const raw_array_data = handler.getDataArray(raw_data);
+ raw_array_data.filter(e => e.name === 'V8.GC_Objects_Stats')
+ .forEach(trace_data => {
+ const actual_data = trace_data.args;
+ const data_sets = new Set(Object.keys(actual_data));
+ Object.keys(actual_data).forEach(data_set => {
+ const string_entry = actual_data[data_set];
+ try {
+ const entry = JSON.parse(string_entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, data_set);
+ const isolate = entry.isolate;
+ const time = entry.time;
+ const gc_id = entry.id;
+ data[isolate].gcs[gc_id].time = time;
+ data[isolate].gcs[gc_id][data_set].bucket_sizes =
+ entry.bucket_sizes;
+ for (let [instance_type, value] of Object.entries(
+ entry.type_data)) {
+ // Trace file format uses markers that do not have actual
+ // properties.
+ if (!('overall' in value)) continue;
+ this.addInstanceTypeData(
+ data, isolate, gc_id, data_set, instance_type, value);
+ }
+ } catch (e) {
+ console.log('Unable to parse data set entry', e);
+ }
+ });
+ });
} catch (e) {
- console.log('Unable to parse chrome trace file.', e);
+ console.error('Unable to parse chrome trace file.', e);
}
- this.extendAndSanitizeModel(data, keys);
return data;
}
@@ -249,14 +217,12 @@ class TraceFileReader extends HTMLElement {
});
const data = Object.create(null); // Final data container.
- const keys = Object.create(null); // Collecting 'keys' per isolate.
-
for (var entry of contents) {
if (entry === null || entry.type === undefined) {
continue;
}
if (entry.type === 'zone') {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
const stacktrace = ('stacktrace' in entry) ? entry.stacktrace : [];
data[entry.isolate].samples.zone[entry.time] = {
allocated: entry.allocated,
@@ -265,26 +231,26 @@ class TraceFileReader extends HTMLElement {
};
} else if (
entry.type === 'zonecreation' || entry.type === 'zonedestruction') {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
data[entry.isolate].zonetags.push(
Object.assign({opening: entry.type === 'zonecreation'}, entry));
} else if (entry.type === 'gc_descriptor') {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
data[entry.isolate].gcs[entry.id].time = entry.time;
if ('zone' in entry)
data[entry.isolate].gcs[entry.id].malloced = entry.zone;
} else if (entry.type === 'instance_type_data') {
if (entry.id in data[entry.isolate].gcs) {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
- this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, entry.key);
this.addInstanceTypeData(
- data, keys, entry.isolate, entry.id, entry.key,
+ data, entry.isolate, entry.id, entry.key,
entry.instance_type_name, entry);
}
} else if (entry.type === 'bucket_sizes') {
if (entry.id in data[entry.isolate].gcs) {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
- this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, entry.key);
data[entry.isolate].gcs[entry.id][entry.key].bucket_sizes =
entry.sizes;
}
@@ -292,7 +258,6 @@ class TraceFileReader extends HTMLElement {
console.log('Unknown entry type: ' + entry.type);
}
}
- this.extendAndSanitizeModel(data, keys);
return data;
}
}
diff --git a/deps/v8/tools/isolate_driver.py b/deps/v8/tools/isolate_driver.py
index a6bcfbf71f..32077e236f 100644
--- a/deps/v8/tools/isolate_driver.py
+++ b/deps/v8/tools/isolate_driver.py
@@ -4,7 +4,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""Adaptor script called through build/isolate.gypi.
+"""Adaptor script called through gni/isolate.gni.
Creates a wrapping .isolate which 'includes' the original one, that can be
consumed by tools/swarming_client/isolate.py. Path variables are determined
diff --git a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py b/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
deleted file mode 100644
index 2925213ced..0000000000
--- a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import re
-import subprocess
-
-FUZZ_PATH = os.path.dirname(os.path.abspath(__file__))
-SHA1_PATH = os.path.join(FUZZ_PATH, 'jsfunfuzz.tar.gz.sha1')
-
-if re.search(r'\bjsfunfuzz=1', os.environ.get('GYP_DEFINES', '')):
- subprocess.check_call([
- 'download_from_google_storage',
- '-b', 'chrome-v8-jsfunfuzz',
- '-u', '--no_resume',
- '-s', SHA1_PATH,
- '--platform=linux*'
- ])
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
deleted file mode 100644
index 8938e44538..0000000000
--- a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'jsfunfuzz_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'jsfunfuzz.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index 9a6600225b..b97ce455c2 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -4,16 +4,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""MB - the Meta-Build wrapper around GYP and GN
+"""MB - the Meta-Build wrapper around GN.
-MB is a wrapper script for GYP and GN that can be used to generate build files
+MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
-# TODO(thomasanderson): Remove this comment. It is added to
-# workaround https://crbug.com/736215 for CL
-# https://codereview.chromium.org/2974603002/
-
from __future__ import print_function
import argparse
@@ -22,6 +18,7 @@ import errno
import json
import os
import pipes
+import platform
import pprint
import re
import shutil
@@ -95,21 +92,17 @@ class MetaBuildWrapper(object):
help='path to config file '
'(default is %(default)s)')
subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
- default=self.default_isolate_map,
help='path to isolate map file '
- '(default is %(default)s)')
+ '(default is %(default)s)',
+ default=[],
+ action='append',
+ dest='isolate_map_files')
subp.add_argument('-g', '--goma-dir',
help='path to goma directory')
- subp.add_argument('--gyp-script', metavar='PATH',
- default=self.PathJoin('build', 'gyp_chromium'),
- help='path to gyp script relative to project root '
- '(default is %(default)s)')
subp.add_argument('--android-version-code',
- help='Sets GN arg android_default_version_code and '
- 'GYP_DEFINE app_manifest_version_code')
+ help='Sets GN arg android_default_version_code')
subp.add_argument('--android-version-name',
- help='Sets GN arg android_default_version_name and '
- 'GYP_DEFINE app_manifest_version_name')
+ help='Sets GN arg android_default_version_name')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
@@ -190,7 +183,6 @@ class MetaBuildWrapper(object):
' --test-launcher-retry-limit=0'
'\n'
)
-
AddCommonOptions(subp)
subp.add_argument('-j', '--jobs', dest='jobs', type=int,
help='Number of jobs to pass to ninja')
@@ -202,6 +194,14 @@ class MetaBuildWrapper(object):
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
+ subp.add_argument('-s', '--swarmed', action='store_true',
+ help='Run under swarming with the default dimensions')
+ subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
+ dest='dimensions', metavar='FOO bar',
+ help='dimension to filter on')
+ subp.add_argument('--no-default-dimensions', action='store_false',
+ dest='default_dimensions', default=True,
+ help='Do not automatically add dimensions to the task')
subp.add_argument('target', nargs=1,
help='ninja target to build and run')
subp.add_argument('extra_args', nargs='*',
@@ -217,26 +217,6 @@ class MetaBuildWrapper(object):
help='path to config file (default is %(default)s)')
subp.set_defaults(func=self.CmdValidate)
- subp = subps.add_parser('audit',
- help='Audit the config file to track progress')
- subp.add_argument('-f', '--config-file', metavar='PATH',
- default=self.default_config,
- help='path to config file (default is %(default)s)')
- subp.add_argument('-i', '--internal', action='store_true',
- help='check internal masters also')
- subp.add_argument('-m', '--master', action='append',
- help='master to audit (default is all non-internal '
- 'masters in file)')
- subp.add_argument('-u', '--url-template', action='store',
- default='https://build.chromium.org/p/'
- '{master}/json/builders',
- help='URL scheme for JSON APIs to buildbot '
- '(default: %(default)s) ')
- subp.add_argument('-c', '--check-compile', action='store_true',
- help='check whether tbd and master-only bots actually'
- ' do compiles')
- subp.set_defaults(func=self.CmdAudit)
-
subp = subps.add_parser('gerrit-buildbucket-config',
help='Print buildbucket.config for gerrit '
'(see MB user guide)')
@@ -276,11 +256,7 @@ class MetaBuildWrapper(object):
def CmdAnalyze(self):
vals = self.Lookup()
- self.ClobberIfNeeded(vals)
- if vals['type'] == 'gn':
- return self.RunGNAnalyze(vals)
- else:
- return self.RunGYPAnalyze(vals)
+ return self.RunGNAnalyze(vals)
def CmdExport(self):
self.ReadConfigFile()
@@ -312,11 +288,7 @@ class MetaBuildWrapper(object):
def CmdGen(self):
vals = self.Lookup()
- self.ClobberIfNeeded(vals)
- if vals['type'] == 'gn':
- return self.RunGNGen(vals)
- else:
- return self.RunGYPGen(vals)
+ return self.RunGNGen(vals)
def CmdHelp(self):
if self.args.subcommand:
@@ -328,21 +300,14 @@ class MetaBuildWrapper(object):
vals = self.GetConfig()
if not vals:
return 1
-
- if vals['type'] == 'gn':
- return self.RunGNIsolate()
- else:
- return self.Build('%s_run' % self.args.target[0])
+ return self.RunGNIsolate()
def CmdLookup(self):
vals = self.Lookup()
- if vals['type'] == 'gn':
- cmd = self.GNCmd('gen', '_path_')
- gn_args = self.GNArgs(vals)
- self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
- env = None
- else:
- cmd, env = self.GYPCmd('_path_', vals)
+ cmd = self.GNCmd('gen', '_path_')
+ gn_args = self.GNArgs(vals)
+ self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+ env = None
self.PrintCmd(cmd, env)
return 0
@@ -355,32 +320,86 @@ class MetaBuildWrapper(object):
build_dir = self.args.path[0]
target = self.args.target[0]
- if vals['type'] == 'gn':
- if self.args.build:
- ret = self.Build(target)
- if ret:
- return ret
- ret = self.RunGNIsolate()
+ if self.args.build:
+ ret = self.Build(target)
if ret:
return ret
+ ret = self.RunGNIsolate()
+ if ret:
+ return ret
+
+ if self.args.swarmed:
+ return self._RunUnderSwarming(build_dir, target)
else:
- ret = self.Build('%s_run' % target)
- if ret:
- return ret
+ return self._RunLocallyIsolated(build_dir, target)
+
+ def _RunUnderSwarming(self, build_dir, target):
+ # TODO(dpranke): Look up the information for the target in
+ # the //testing/buildbot.json file, if possible, so that we
+ # can determine the isolate target, command line, and additional
+ # swarming parameters, if possible.
+ #
+ # TODO(dpranke): Also, add support for sharding and merging results.
+ dimensions = []
+ for k, v in self._DefaultDimensions() + self.args.dimensions:
+ dimensions += ['-d', k, v]
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ 'archive',
+ '-s',
+ self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+ '-I', 'isolateserver.appspot.com',
+ ]
+ ret, out, _ = self.Run(cmd, force_verbose=False)
+ if ret:
+ return ret
+
+ isolated_hash = out.splitlines()[0].split()[0]
+ cmd = [
+ self.executable,
+ self.PathJoin('tools', 'swarming_client', 'swarming.py'),
+ 'run',
+ '-s', isolated_hash,
+ '-I', 'isolateserver.appspot.com',
+ '-S', 'chromium-swarm.appspot.com',
+ ] + dimensions
+ if self.args.extra_args:
+ cmd += ['--'] + self.args.extra_args
+ ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+ return ret
+
+ def _RunLocallyIsolated(self, build_dir, target):
+ cmd = [
+ self.executable,
+ self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'run',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
- ]
+ ]
if self.args.extra_args:
- cmd += ['--'] + self.args.extra_args
+ cmd += ['--'] + self.args.extra_args
+ ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+ return ret
- ret, _, _ = self.Run(cmd, force_verbose=False, buffer_output=False)
+ def _DefaultDimensions(self):
+ if not self.args.default_dimensions:
+ return []
+
+ # This code is naive and just picks reasonable defaults per platform.
+ if self.platform == 'darwin':
+ os_dim = ('os', 'Mac-10.12')
+ elif self.platform.startswith('linux'):
+ os_dim = ('os', 'Ubuntu-14.04')
+ elif self.platform == 'win32':
+ os_dim = ('os', 'Windows-10-14393')
+ else:
+ raise MBErr('unrecognized platform string "%s"' % self.platform)
- return ret
+ return [('pool', 'Chrome'),
+ ('cpu', 'x86-64'),
+ os_dim]
def CmdBuildbucket(self):
self.ReadConfigFile()
@@ -462,154 +481,26 @@ class MetaBuildWrapper(object):
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
- def CmdAudit(self):
- """Track the progress of the GYP->GN migration on the bots."""
-
- # First, make sure the config file is okay, but don't print anything
- # if it is (it will throw an error if it isn't).
- self.CmdValidate(print_ok=False)
-
- stats = OrderedDict()
- STAT_MASTER_ONLY = 'Master only'
- STAT_CONFIG_ONLY = 'Config only'
- STAT_TBD = 'Still TBD'
- STAT_GYP = 'Still GYP'
- STAT_DONE = 'Done (on GN)'
- stats[STAT_MASTER_ONLY] = 0
- stats[STAT_CONFIG_ONLY] = 0
- stats[STAT_TBD] = 0
- stats[STAT_GYP] = 0
- stats[STAT_DONE] = 0
-
- def PrintBuilders(heading, builders, notes):
- stats.setdefault(heading, 0)
- stats[heading] += len(builders)
- if builders:
- self.Print(' %s:' % heading)
- for builder in sorted(builders):
- self.Print(' %s%s' % (builder, notes[builder]))
-
- self.ReadConfigFile()
-
- masters = self.args.master or self.masters
- for master in sorted(masters):
- url = self.args.url_template.replace('{master}', master)
-
- self.Print('Auditing %s' % master)
-
- MASTERS_TO_SKIP = (
- 'client.skia',
- 'client.v8.fyi',
- 'tryserver.v8',
- )
- if master in MASTERS_TO_SKIP:
- # Skip these bots because converting them is the responsibility of
- # those teams and out of scope for the Chromium migration to GN.
- self.Print(' Skipped (out of scope)')
- self.Print('')
- continue
-
- INTERNAL_MASTERS = ('official.desktop', 'official.desktop.continuous',
- 'internal.client.kitchensync')
- if master in INTERNAL_MASTERS and not self.args.internal:
- # Skip these because the servers aren't accessible by default ...
- self.Print(' Skipped (internal)')
- self.Print('')
- continue
-
- try:
- # Fetch the /builders contents from the buildbot master. The
- # keys of the dict are the builder names themselves.
- json_contents = self.Fetch(url)
- d = json.loads(json_contents)
- except Exception as e:
- self.Print(str(e))
- return 1
-
- config_builders = set(self.masters[master])
- master_builders = set(d.keys())
- both = master_builders & config_builders
- master_only = master_builders - config_builders
- config_only = config_builders - master_builders
- tbd = set()
- gyp = set()
- done = set()
- notes = {builder: '' for builder in config_builders | master_builders}
-
- for builder in both:
- config = self.masters[master][builder]
- if config == 'tbd':
- tbd.add(builder)
- elif isinstance(config, dict):
- vals = self.FlattenConfig(config.values()[0])
- if vals['type'] == 'gyp':
- gyp.add(builder)
- else:
- done.add(builder)
- elif config.startswith('//'):
- done.add(builder)
- else:
- vals = self.FlattenConfig(config)
- if vals['type'] == 'gyp':
- gyp.add(builder)
- else:
- done.add(builder)
-
- if self.args.check_compile and (tbd or master_only):
- either = tbd | master_only
- for builder in either:
- notes[builder] = ' (' + self.CheckCompile(master, builder) +')'
-
- if master_only or config_only or tbd or gyp:
- PrintBuilders(STAT_MASTER_ONLY, master_only, notes)
- PrintBuilders(STAT_CONFIG_ONLY, config_only, notes)
- PrintBuilders(STAT_TBD, tbd, notes)
- PrintBuilders(STAT_GYP, gyp, notes)
- else:
- self.Print(' All GN!')
-
- stats[STAT_DONE] += len(done)
-
- self.Print('')
-
- fmt = '{:<27} {:>4}'
- self.Print(fmt.format('Totals', str(sum(int(v) for v in stats.values()))))
- self.Print(fmt.format('-' * 27, '----'))
- for stat, count in stats.items():
- self.Print(fmt.format(stat, str(count)))
-
- return 0
-
def GetConfig(self):
build_dir = self.args.path[0]
vals = self.DefaultVals()
if self.args.builder or self.args.master or self.args.config:
vals = self.Lookup()
- if vals['type'] == 'gn':
- # Re-run gn gen in order to ensure the config is consistent with the
- # build dir.
- self.RunGNGen(vals)
+ # Re-run gn gen in order to ensure the config is consistent with the
+ # build dir.
+ self.RunGNGen(vals)
return vals
- mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type')
- if not self.Exists(mb_type_path):
- toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
- 'toolchain.ninja')
- if not self.Exists(toolchain_path):
- self.Print('Must either specify a path to an existing GN build dir '
- 'or pass in a -m/-b pair or a -c flag to specify the '
- 'configuration')
- return {}
- else:
- mb_type = 'gn'
- else:
- mb_type = self.ReadFile(mb_type_path).strip()
-
- if mb_type == 'gn':
- vals['gn_args'] = self.GNArgsFromDir(build_dir)
- vals['type'] = mb_type
+ toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
+ 'toolchain.ninja')
+ if not self.Exists(toolchain_path):
+ self.Print('Must either specify a path to an existing GN build dir '
+ 'or pass in a -m/-b pair or a -c flag to specify the '
+ 'configuration')
+ return {}
+ vals['gn_args'] = self.GNArgsFromDir(build_dir)
return vals
def GNArgsFromDir(self, build_dir):
@@ -641,14 +532,6 @@ class MetaBuildWrapper(object):
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
vals = self.FlattenConfig(config)
-
- # Do some basic sanity checking on the config so that we
- # don't have to do this in every caller.
- if 'type' not in vals:
- vals['type'] = 'gn'
- assert vals['type'] in ('gn', 'gyp'), (
- 'Unknown meta-build type "%s"' % vals['gn_args'])
-
return vals
def ReadIOSBotConfig(self):
@@ -660,17 +543,10 @@ class MetaBuildWrapper(object):
return {}
contents = json.loads(self.ReadFile(path))
- gyp_vals = contents.get('GYP_DEFINES', {})
- if isinstance(gyp_vals, dict):
- gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items())
- else:
- gyp_defines = ' '.join(gyp_vals)
gn_args = ' '.join(contents.get('gn_args', []))
vals = self.DefaultVals()
vals['gn_args'] = gn_args
- vals['gyp_defines'] = gyp_defines
- vals['type'] = contents.get('mb_type', 'gn')
return vals
def ReadConfigFile(self):
@@ -689,14 +565,26 @@ class MetaBuildWrapper(object):
self.mixins = contents['mixins']
def ReadIsolateMap(self):
- if not self.Exists(self.args.isolate_map_file):
- raise MBErr('isolate map file not found at %s' %
- self.args.isolate_map_file)
- try:
- return ast.literal_eval(self.ReadFile(self.args.isolate_map_file))
- except SyntaxError as e:
- raise MBErr('Failed to parse isolate map file "%s": %s' %
- (self.args.isolate_map_file, e))
+ if not self.args.isolate_map_files:
+ self.args.isolate_map_files = [self.default_isolate_map]
+
+ for f in self.args.isolate_map_files:
+ if not self.Exists(f):
+ raise MBErr('isolate map file not found at %s' % f)
+ isolate_maps = {}
+ for isolate_map in self.args.isolate_map_files:
+ try:
+ isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
+ duplicates = set(isolate_map).intersection(isolate_maps)
+ if duplicates:
+ raise MBErr(
+ 'Duplicate targets in isolate map files: %s.' %
+ ', '.join(duplicates))
+ isolate_maps.update(isolate_map)
+ except SyntaxError as e:
+ raise MBErr(
+ 'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
+ return isolate_maps
def ConfigFromArgs(self):
if self.args.config:
@@ -747,9 +635,6 @@ class MetaBuildWrapper(object):
'args_file': '',
'cros_passthrough': False,
'gn_args': '',
- 'gyp_defines': '',
- 'gyp_crosscompile': False,
- 'type': 'gn',
}
def FlattenMixins(self, mixins, vals, visited):
@@ -773,50 +658,11 @@ class MetaBuildWrapper(object):
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
- if 'gyp_crosscompile' in mixin_vals:
- vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
- if 'gyp_defines' in mixin_vals:
- if vals['gyp_defines']:
- vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
- else:
- vals['gyp_defines'] = mixin_vals['gyp_defines']
- if 'type' in mixin_vals:
- vals['type'] = mixin_vals['type']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
- def ClobberIfNeeded(self, vals):
- path = self.args.path[0]
- build_dir = self.ToAbsPath(path)
- mb_type_path = self.PathJoin(build_dir, 'mb_type')
- needs_clobber = False
- new_mb_type = vals['type']
- if self.Exists(build_dir):
- if self.Exists(mb_type_path):
- old_mb_type = self.ReadFile(mb_type_path)
- if old_mb_type != new_mb_type:
- self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
- (old_mb_type, new_mb_type, path))
- needs_clobber = True
- else:
- # There is no 'mb_type' file in the build directory, so this probably
- # means that the prior build(s) were not done through mb, and we
- # have no idea if this was a GYP build or a GN build. Clobber it
- # to be safe.
- self.Print("%s/mb_type missing, clobbering to be safe" % path)
- needs_clobber = True
-
- if self.args.dryrun:
- return
-
- if needs_clobber:
- self.RemoveDirectory(build_dir)
-
- self.MaybeMakeDirectory(build_dir)
- self.WriteFile(mb_type_path, new_mb_type)
-
def RunGNGen(self, vals, compute_grit_inputs_for_analyze=False):
build_dir = self.args.path[0]
@@ -861,6 +707,7 @@ class MetaBuildWrapper(object):
return ret
android = 'target_os="android"' in vals['gn_args']
+ fuchsia = 'target_os="fuchsia"' in vals['gn_args']
for target in swarming_targets:
if android:
# Android targets may be either android_apk or executable. The former
@@ -870,6 +717,11 @@ class MetaBuildWrapper(object):
runtime_deps_targets = [
target + '.runtime_deps',
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+ elif fuchsia:
+ # Only emit a runtime deps file for the group() target on Fuchsia.
+ label = isolate_map[target]['label']
+ runtime_deps_targets = [
+ 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
elif (isolate_map[target]['type'] == 'script' or
isolate_map[target].get('label_type') == 'group'):
# For script targets, the build target is usually a group,
@@ -1023,38 +875,6 @@ class MetaBuildWrapper(object):
gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
return gn_args
- def RunGYPGen(self, vals):
- path = self.args.path[0]
-
- output_dir = self.ParseGYPConfigPath(path)
- cmd, env = self.GYPCmd(output_dir, vals)
- ret, _, _ = self.Run(cmd, env=env)
- return ret
-
- def RunGYPAnalyze(self, vals):
- output_dir = self.ParseGYPConfigPath(self.args.path[0])
- if self.args.verbose:
- inp = self.ReadInputJSON(['files', 'test_targets',
- 'additional_compile_targets'])
- self.Print()
- self.Print('analyze input:')
- self.PrintJSON(inp)
- self.Print()
-
- cmd, env = self.GYPCmd(output_dir, vals)
- cmd.extend(['-f', 'analyzer',
- '-G', 'config_path=%s' % self.args.input_path[0],
- '-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
- ret, _, _ = self.Run(cmd, env=env)
- if not ret and self.args.verbose:
- outp = json.loads(self.ReadFile(self.args.output_path[0]))
- self.Print()
- self.Print('analyze output:')
- self.PrintJSON(outp)
- self.Print()
-
- return ret
-
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
@@ -1066,86 +886,6 @@ class MetaBuildWrapper(object):
return path[2:].replace('/', self.sep)
return self.RelPath(path, self.chromium_src_dir)
- def ParseGYPConfigPath(self, path):
- rpath = self.ToSrcRelPath(path)
- output_dir, _, _ = rpath.rpartition(self.sep)
- return output_dir
-
- def GYPCmd(self, output_dir, vals):
- if vals['cros_passthrough']:
- if not 'GYP_DEFINES' in os.environ:
- raise MBErr('MB is expecting GYP_DEFINES to be in the environment')
- gyp_defines = os.environ['GYP_DEFINES']
- if not 'chromeos=1' in gyp_defines:
- raise MBErr('GYP_DEFINES is missing chromeos=1: (GYP_DEFINES=%s)' %
- gyp_defines)
- else:
- gyp_defines = vals['gyp_defines']
-
- goma_dir = self.args.goma_dir
-
- # GYP uses shlex.split() to split the gyp defines into separate arguments,
- # so we can support backslashes and and spaces in arguments by quoting
- # them, even on Windows, where this normally wouldn't work.
- if goma_dir and ('\\' in goma_dir or ' ' in goma_dir):
- goma_dir = "'%s'" % goma_dir
-
- if goma_dir:
- gyp_defines += ' gomadir=%s' % goma_dir
-
- android_version_code = self.args.android_version_code
- if android_version_code:
- gyp_defines += ' app_manifest_version_code=%s' % android_version_code
-
- android_version_name = self.args.android_version_name
- if android_version_name:
- gyp_defines += ' app_manifest_version_name=%s' % android_version_name
-
- cmd = [
- self.executable,
- self.args.gyp_script,
- '-G',
- 'output_dir=' + output_dir,
- ]
-
- # Ensure that we have an environment that only contains
- # the exact values of the GYP variables we need.
- env = os.environ.copy()
-
- # This is a terrible hack to work around the fact that
- # //tools/clang/scripts/update.py is invoked by GYP and GN but
- # currently relies on an environment variable to figure out
- # what revision to embed in the command line #defines.
- # For GN, we've made this work via a gn arg that will cause update.py
- # to get an additional command line arg, but getting that to work
- # via GYP_DEFINES has proven difficult, so we rewrite the GYP_DEFINES
- # to get rid of the arg and add the old var in, instead.
- # See crbug.com/582737 for more on this. This can hopefully all
- # go away with GYP.
- m = re.search('llvm_force_head_revision=1\s*', gyp_defines)
- if m:
- env['LLVM_FORCE_HEAD_REVISION'] = '1'
- gyp_defines = gyp_defines.replace(m.group(0), '')
-
- # This is another terrible hack to work around the fact that
- # GYP sets the link concurrency to use via the GYP_LINK_CONCURRENCY
- # environment variable, and not via a proper GYP_DEFINE. See
- # crbug.com/611491 for more on this.
- m = re.search('gyp_link_concurrency=(\d+)(\s*)', gyp_defines)
- if m:
- env['GYP_LINK_CONCURRENCY'] = m.group(1)
- gyp_defines = gyp_defines.replace(m.group(0), '')
-
- env['GYP_GENERATORS'] = 'ninja'
- if 'GYP_CHROMIUM_NO_ACTION' in env:
- del env['GYP_CHROMIUM_NO_ACTION']
- if 'GYP_CROSSCOMPILE' in env:
- del env['GYP_CROSSCOMPILE']
- env['GYP_DEFINES'] = gyp_defines
- if vals['gyp_crosscompile']:
- env['GYP_CROSSCOMPILE'] = '1'
- return cmd, env
-
def RunGNAnalyze(self, vals):
# Analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
@@ -1347,9 +1087,6 @@ class MetaBuildWrapper(object):
if env and var in env:
self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
- print_env('GYP_CROSSCOMPILE')
- print_env('GYP_DEFINES')
- print_env('GYP_LINK_CONCURRENCY')
print_env('LLVM_FORCE_HEAD_REVISION')
if cmd[0] == self.executable:
@@ -1486,7 +1223,6 @@ def QuoteForSet(arg):
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
- # From //tools/gyp/pylib/gyp/msvs_emulation.py:23.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 15763750da..0413457eab 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -65,8 +65,6 @@ class FakeMBW(mb.MetaBuildWrapper):
self.files[path] = contents
def Call(self, cmd, env=None, buffer_output=True):
- if env:
- self.cross_compile = env.get('GYP_CROSSCOMPILE')
self.calls.append(cmd)
if self.cmds:
return self.cmds.pop(0)
@@ -112,13 +110,10 @@ TEST_CONFIG = """\
'masters': {
'chromium': {},
'fake_master': {
- 'fake_builder': 'gyp_rel_bot',
- 'fake_gn_builder': 'gn_rel_bot',
- 'fake_gyp_crosscompile_builder': 'gyp_crosscompile',
- 'fake_gn_debug_builder': 'gn_debug_goma',
- 'fake_gyp_builder': 'gyp_debug',
- 'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn',
- 'fake_multi_phase': { 'phase_1': 'gn_phase_1', 'phase_2': 'gn_phase_2'},
+ 'fake_builder': 'rel_bot',
+ 'fake_debug_builder': 'debug_goma',
+ 'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn',
+ 'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'},
'fake_args_file': 'args_file_goma',
'fake_args_file_twice': 'args_file_twice',
},
@@ -126,38 +121,26 @@ TEST_CONFIG = """\
'configs': {
'args_file_goma': ['args_file', 'goma'],
'args_file_twice': ['args_file', 'args_file'],
- 'gyp_rel_bot': ['gyp', 'rel', 'goma'],
- 'gn_debug_goma': ['gn', 'debug', 'goma'],
- 'gyp_debug': ['gyp', 'debug', 'fake_feature1'],
- 'gn_rel_bot': ['gn', 'rel', 'goma'],
- 'gyp_crosscompile': ['gyp', 'crosscompile'],
- 'gn_phase_1': ['gn', 'phase_1'],
- 'gn_phase_2': ['gn', 'phase_2'],
+ 'rel_bot': ['rel', 'goma', 'fake_feature1'],
+ 'debug_goma': ['debug', 'goma'],
+ 'phase_1': ['phase_1'],
+ 'phase_2': ['phase_2'],
},
'mixins': {
- 'crosscompile': {
- 'gyp_crosscompile': True,
- },
'fake_feature1': {
'gn_args': 'enable_doom_melon=true',
- 'gyp_defines': 'doom_melon=1',
},
- 'gyp': {'type': 'gyp'},
- 'gn': {'type': 'gn'},
'goma': {
'gn_args': 'use_goma=true',
- 'gyp_defines': 'goma=1',
},
'args_file': {
'args_file': '//build/args/fake.gn',
},
'phase_1': {
'gn_args': 'phase=1',
- 'gyp_args': 'phase=1',
},
'phase_2': {
'gn_args': 'phase=2',
- 'gyp_args': 'phase=2',
},
'rel': {
'gn_args': 'is_debug=false',
@@ -169,28 +152,6 @@ TEST_CONFIG = """\
}
"""
-GYP_HACKS_CONFIG = """\
-{
- 'masters': {
- 'chromium': {},
- 'fake_master': {
- 'fake_builder': 'fake_config',
- },
- },
- 'configs': {
- 'fake_config': ['fake_mixin'],
- },
- 'mixins': {
- 'fake_mixin': {
- 'type': 'gyp',
- 'gn_args': '',
- 'gyp_defines':
- ('foo=bar llvm_force_head_revision=1 '
- 'gyp_link_concurrency=1 baz=1'),
- },
- },
-}
-"""
TRYSERVER_CONFIG = """\
{
@@ -229,7 +190,7 @@ class UnitTest(unittest.TestCase):
},
}''')
mbw.files.setdefault(
- mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'),
+ mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'),
'is_debug = false\n')
if files:
for path, contents in files.items():
@@ -249,37 +210,6 @@ class UnitTest(unittest.TestCase):
self.assertEqual(mbw.err, err)
return mbw
- def test_clobber(self):
- files = {
- '/fake_src/out/Debug': None,
- '/fake_src/out/Debug/mb_type': None,
- }
- mbw = self.fake_mbw(files)
-
- # The first time we run this, the build dir doesn't exist, so no clobber.
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs, [])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
-
- # The second time we run this, the build dir exists and matches, so no
- # clobber.
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs, [])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
-
- # Now we switch build types; this should result in a clobber.
- self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
-
- # Now we delete mb_type; this checks the case where the build dir
- # exists but wasn't populated by mb; this should also result in a clobber.
- del mbw.files['/fake_src/out/Debug/mb_type']
- self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs,
- ['/fake_src/out/Debug', '/fake_src/out/Debug'])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
-
def test_analyze(self):
files = {'/tmp/in.json': '''{\
"files": ["foo/foo_unittest.cc"],
@@ -295,7 +225,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
@@ -319,7 +249,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
@@ -342,7 +272,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
@@ -369,7 +299,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
@@ -379,9 +309,9 @@ class UnitTest(unittest.TestCase):
# test_targets and additional_compile_targets.
self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
- def test_gn_gen(self):
+ def test_gen(self):
mbw = self.fake_mbw()
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'],
+ self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
mbw=mbw, ret=0)
self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
('goma_dir = "/goma"\n'
@@ -394,7 +324,7 @@ class UnitTest(unittest.TestCase):
mbw.out)
mbw = self.fake_mbw(win32=True)
- self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'],
+ self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
mbw=mbw, ret=0)
self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
('goma_dir = "c:\\\\goma"\n'
@@ -404,14 +334,14 @@ class UnitTest(unittest.TestCase):
'--check\n', mbw.out)
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot',
+ self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot',
'//out/Debug'],
mbw=mbw, ret=0)
self.assertEqual(
mbw.files['/fake_src/out/Debug/args.gn'],
- 'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n')
+ 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
- def test_gn_gen_args_file_mixins(self):
+ def test_gen_args_file_mixins(self):
mbw = self.fake_mbw()
self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file',
'//out/Debug'], mbw=mbw, ret=0)
@@ -425,14 +355,14 @@ class UnitTest(unittest.TestCase):
self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file_twice',
'//out/Debug'], mbw=mbw, ret=1)
- def test_gn_gen_fails(self):
+ def test_gen_fails(self):
mbw = self.fake_mbw()
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1)
+ self.check(['gen', '-c', 'debug_goma', '//out/Default'], mbw=mbw, ret=1)
# TODO(machenbach): Comment back in after swarming file parameter is used.
"""
- def test_gn_gen_swarming(self):
+ def test_gen_swarming(self):
files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -448,7 +378,7 @@ class UnitTest(unittest.TestCase):
}
mbw = self.fake_mbw(files)
self.check(['gen',
- '-c', 'gn_debug_goma',
+ '-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('/fake_src/out/Default/base_unittests.isolate',
@@ -456,7 +386,7 @@ class UnitTest(unittest.TestCase):
self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
mbw.files)
- def test_gn_gen_swarming_script(self):
+ def test_gen_swarming_script(self):
files = {
'/tmp/swarming_targets': 'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -473,7 +403,7 @@ class UnitTest(unittest.TestCase):
}
mbw = self.fake_mbw(files=files, win32=True)
self.check(['gen',
- '-c', 'gn_debug_goma',
+ '-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
@@ -482,9 +412,77 @@ class UnitTest(unittest.TestCase):
mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
- """ # pylint: disable=pointless-string-statement
- def test_gn_isolate(self):
+
+ def test_multiple_isolate_maps(self):
+ files = {
+ '/tmp/swarming_targets': 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+ "{'cc_perftests2': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+ "cc_perftests\n"
+ ),
+ }
+ mbw = self.fake_mbw(files=files, win32=True)
+ self.check(['gen',
+ '-c', 'debug_goma',
+ '--swarming-targets-file', '/tmp/swarming_targets',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+ '//out/Default'], mbw=mbw, ret=0)
+ self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
+ mbw.files)
+ self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
+ mbw.files)
+
+
+ def test_duplicate_isolate_maps(self):
+ files = {
+ '/tmp/swarming_targets': 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+ "{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+ "cc_perftests\n"
+ ),
+ }
+ mbw = self.fake_mbw(files=files, win32=True)
+ # Check that passing duplicate targets into mb fails.
+ self.check(['gen',
+ '-c', 'debug_goma',
+ '--swarming-targets-file', '/tmp/swarming_targets',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+ '//out/Default'], mbw=mbw, ret=1)
+
+ def test_isolate(self):
files = {
'/fake_src/out/Default/toolchain.ninja': "",
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -498,7 +496,7 @@ class UnitTest(unittest.TestCase):
"base_unittests\n"
),
}
- self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['isolate', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
# test running isolate on an existing build_dir
@@ -506,11 +504,10 @@ class UnitTest(unittest.TestCase):
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
- files['/fake_src/out/Default/mb_type'] = 'gn\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
- def test_gn_run(self):
+ def test_run(self):
files = {
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
@@ -523,55 +520,51 @@ class UnitTest(unittest.TestCase):
"base_unittests\n"
),
}
- self.check(['run', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['run', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
- def test_gn_lookup(self):
- self.check(['lookup', '-c', 'gn_debug_goma'], ret=0)
+ def test_run_swarmed(self):
+ files = {
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/out/Default/base_unittests.runtime_deps': (
+ "base_unittests\n"
+ ),
+ }
+
+ def run_stub(cmd, **_kwargs):
+ if 'isolate.py' in cmd[1]:
+ return 0, 'fake_hash base_unittests', ''
+ else:
+ return 0, '', ''
+
+ mbw = self.fake_mbw(files=files)
+ mbw.Run = run_stub
+ self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
+ 'base_unittests'], mbw=mbw, ret=0)
+ self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
+ '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
+ """ # pylint: disable=pointless-string-statement
- def test_gn_lookup_goma_dir_expansion(self):
- self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
+ def test_lookup(self):
+ self.check(['lookup', '-c', 'debug_goma'], ret=0)
+
+ def test_lookup_goma_dir_expansion(self):
+ self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
out=('\n'
'Writing """\\\n'
+ 'enable_doom_melon = true\n'
'goma_dir = "/foo"\n'
'is_debug = false\n'
'use_goma = true\n'
'""" to _path_/args.gn.\n\n'
'/fake_src/buildtools/linux64/gn gen _path_\n'))
- def test_gyp_analyze(self):
- mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
- '/tmp/in.json', '/tmp/out.json'], ret=0)
- self.assertIn('analyzer', mbw.calls[0])
-
- def test_gyp_crosscompile(self):
- mbw = self.fake_mbw()
- self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'],
- mbw=mbw, ret=0)
- self.assertTrue(mbw.cross_compile)
-
- def test_gyp_gen(self):
- self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
- ret=0,
- out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
- "python build/gyp_chromium -G output_dir=out\n"))
-
- mbw = self.fake_mbw(win32=True)
- self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
- mbw=mbw, ret=0,
- out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
- "python build\\gyp_chromium -G output_dir=out\n"))
-
- def test_gyp_gen_fails(self):
- mbw = self.fake_mbw()
- mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
- self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
-
- def test_gyp_lookup_goma_dir_expansion(self):
- self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
- out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
- "python build/gyp_chromium -G output_dir=_path_\n"))
-
def test_help(self):
orig_stdout = sys.stdout
try:
@@ -589,7 +582,7 @@ class UnitTest(unittest.TestCase):
self.assertIn('Must specify a build --phase', mbw.out)
# Check that passing a --phase to a single-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder',
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder',
'--phase', 'phase_1'], ret=1)
self.assertIn('Must not specify a build --phase', mbw.out)
@@ -611,16 +604,6 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw()
self.check(['validate'], mbw=mbw, ret=0)
- def test_gyp_env_hacks(self):
- mbw = self.fake_mbw()
- mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
- self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
- ret=0,
- out=("GYP_DEFINES='foo=bar baz=1'\n"
- "GYP_LINK_CONCURRENCY=1\n"
- "LLVM_FORCE_HEAD_REVISION=1\n"
- "python build/gyp_chromium -G output_dir=_path_\n"))
-
def test_buildbucket(self):
mbw = self.fake_mbw()
mbw.files[mbw.default_config] = TRYSERVER_CONFIG
diff --git a/deps/v8/tools/mingw-generate-makefiles.sh b/deps/v8/tools/mingw-generate-makefiles.sh
deleted file mode 100755
index 67715fc15b..0000000000
--- a/deps/v8/tools/mingw-generate-makefiles.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/sh
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Monkey-patch GYP.
-cat > tools/gyp/gyp.mingw << EOF
-#!/usr/bin/env python
-
-# Copyright (c) 2009 Google Inc. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import sys
-
-# TODO(mark): sys.path manipulation is some temporary testing stuff.
-try:
- import gyp
-except ImportError, e:
- import os.path
- sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
- import gyp
-
-def MonkeyBuildFileTargets(target_list, build_file):
- """From a target_list, returns the subset from the specified build_file.
- """
- build_file = build_file.replace('/', '\\\\')
- return [p for p in target_list if gyp.common.BuildFile(p) == build_file]
-gyp.common.BuildFileTargets = MonkeyBuildFileTargets
-
-import gyp.generator.make
-import os
-def Monkey_ITIP(self):
- """Returns the location of the final output for an installable target."""
- sep = os.path.sep
- # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
- # rely on this. Emulate this behavior for mac.
- if (self.type == 'shared_library' and
- (self.flavor != 'mac' or self.toolset != 'target')):
- # Install all shared libs into a common directory (per toolset) for
- # convenient access with LD_LIBRARY_PATH.
- return '\$(builddir)%slib.%s%s%s' % (sep, self.toolset, sep, self.alias)
- return '\$(builddir)' + sep + self.alias
-gyp.generator.make.MakefileWriter._InstallableTargetInstallPath = Monkey_ITIP
-
-if __name__ == '__main__':
- sys.exit(gyp.main(sys.argv[1:]))
-EOF
-
-# Delete old generated Makefiles.
-find out -name '*.mk' -or -name 'Makefile*' -exec rm {} \;
-
-# Generate fresh Makefiles.
-mv tools/gyp/gyp tools/gyp/gyp.original
-mv tools/gyp/gyp.mingw tools/gyp/gyp
-make out/Makefile.ia32
-mv tools/gyp/gyp tools/gyp/gyp.mingw
-mv tools/gyp/gyp.original tools/gyp/gyp
-
-# Patch generated Makefiles: replace most backslashes with forward slashes,
-# fix library names in linker flags.
-FILES=$(find out -name '*.mk' -or -name 'Makefile*')
-for F in $FILES ; do
- echo "Patching $F..."
- cp $F $F.orig
- cat $F.orig \
- | sed -e 's|\([)a-zA-Z0-9]\)\\\([a-zA-Z]\)|\1/\2|g' \
- -e 's|\([)a-zA-Z0-9]\)\\\\\([a-zA-Z]\)|\1/\2|g' \
- -e 's|'%s/n'|'%s\\\\n'|g' \
- -e 's|-lwinmm\.lib|-lwinmm|g' \
- -e 's|-lws2_32\.lib|-lws2_32|g' \
- > $F
- rm $F.orig
-done
diff --git a/deps/v8/tools/mips_toolchain.tar.gz.sha1 b/deps/v8/tools/mips_toolchain.tar.gz.sha1
new file mode 100644
index 0000000000..8d4572336a
--- /dev/null
+++ b/deps/v8/tools/mips_toolchain.tar.gz.sha1
@@ -0,0 +1 @@
+d51b5d903340262d8d13ecd51054c16a901b3cf3 \ No newline at end of file
diff --git a/deps/v8/tools/node/build_gn.py b/deps/v8/tools/node/build_gn.py
index 8ab2a635ea..e95c3491e7 100755
--- a/deps/v8/tools/node/build_gn.py
+++ b/deps/v8/tools/node/build_gn.py
@@ -17,6 +17,7 @@ are represented as 1/0. E.g.
v8_enable_disassembler=0
"""
+import argparse
import os
import subprocess
import sys
@@ -31,52 +32,71 @@ GN_ARGS = [
"use_sysroot = false",
]
-BUILD_SUBDIR = "gn"
+BUILD_TARGET = "v8_monolith"
-# TODO: make this cross-platform.
-GN_SUBDIR = ["buildtools", "linux64", "gn"]
+def FindGn(options):
+ if options.host_os == "linux":
+ os_path = "linux64"
+ elif options.host_os == "mac":
+ os_path = "mac"
+ elif options.host_os == "win":
+ os_path = "win"
+ else:
+ raise "Operating system not supported by GN"
+ return os.path.join(options.v8_path, "buildtools", os_path, "gn")
-def Build(v8_path, build_path, depot_tools, is_debug, build_flags):
+def GenerateBuildFiles(options):
print "Setting GN args."
- lines = []
- lines.extend(GN_ARGS)
- for flag in build_flags:
+ gn = FindGn(options)
+ gn_args = []
+ gn_args.extend(GN_ARGS)
+ for flag in options.flag:
flag = flag.replace("=1", "=true")
flag = flag.replace("=0", "=false")
flag = flag.replace("target_cpu=ia32", "target_cpu=\"x86\"")
- lines.append(flag)
- lines.append("is_debug = %s" % ("true" if is_debug else "false"))
- with open(os.path.join(build_path, "args.gn"), "w") as args_file:
- args_file.write("\n".join(lines))
- gn = os.path.join(v8_path, *GN_SUBDIR)
- subprocess.check_call([gn, "gen", "-C", build_path], cwd=v8_path)
- ninja = os.path.join(depot_tools, "ninja")
+ gn_args.append(flag)
+ if options.mode == "DEBUG":
+ gn_args.append("is_debug = true")
+ else:
+ gn_args.append("is_debug = false")
+
+ if not os.path.isdir(options.build_path):
+ os.makedirs(options.build_path)
+ with open(os.path.join(options.build_path, "args.gn"), "w") as args_file:
+ args_file.write("\n".join(gn_args))
+ subprocess.check_call([gn, "gen", "-C", options.build_path],
+ cwd=options.v8_path)
+
+def Build(options):
print "Building."
- subprocess.check_call([ninja, "-v", "-C", build_path, "v8_monolith"],
- cwd=v8_path)
+ depot_tools = node_common.EnsureDepotTools(options.v8_path, False)
+ ninja = os.path.join(depot_tools, "ninja")
+ subprocess.check_call([ninja, "-v", "-C", options.build_path, BUILD_TARGET],
+ cwd=options.v8_path)
+
+def ParseOptions(args):
+ parser = argparse.ArgumentParser(
+ description="Build %s with GN" % BUILD_TARGET)
+ parser.add_argument("--mode", help="Build mode (Release/Debug)")
+ parser.add_argument("--v8_path", help="Path to V8")
+ parser.add_argument("--build_path", help="Path to build result")
+ parser.add_argument("--flag", help="Translate GYP flag to GN",
+ action="append")
+ parser.add_argument("--host_os", help="Current operating system")
+ options = parser.parse_args(args)
-def Main(v8_path, build_path, is_debug, build_flags):
- # Verify paths.
- v8_path = os.path.abspath(v8_path)
- assert os.path.isdir(v8_path)
- build_path = os.path.abspath(build_path)
- build_path = os.path.join(build_path, BUILD_SUBDIR)
- if not os.path.isdir(build_path):
- os.makedirs(build_path)
+ assert options.host_os
+ assert options.mode == "Debug" or options.mode == "Release"
- # Check that we have depot tools.
- depot_tools = node_common.EnsureDepotTools(v8_path, False)
+ assert options.v8_path
+ options.v8_path = os.path.abspath(options.v8_path)
+ assert os.path.isdir(options.v8_path)
- # Build with GN.
- Build(v8_path, build_path, depot_tools, is_debug, build_flags)
+ assert options.build_path
+ options.build_path = os.path.abspath(options.build_path)
+ return options
if __name__ == "__main__":
- # TODO: use argparse to parse arguments.
- build_mode = sys.argv[1]
- v8_path = sys.argv[2]
- build_path = sys.argv[3]
- assert build_mode == "Debug" or build_mode == "Release"
- is_debug = build_mode == "Debug"
- # TODO: introduce "--" flag for pass-through flags.
- build_flags = sys.argv[4:]
- Main(v8_path, build_path, is_debug, build_flags)
+ options = ParseOptions(sys.argv[1:])
+ GenerateBuildFiles(options)
+ Build(options)
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
index a3e6d74917..09a4e6cb97 100755
--- a/deps/v8/tools/node/fetch_deps.py
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -32,7 +32,6 @@ GCLIENT_SOLUTION = [
"v8/test/mozilla/data" : None,
"v8/test/test262/data" : None,
"v8/test/test262/harness" : None,
- "v8/test/wasm-js" : None,
"v8/third_party/android_tools" : None,
"v8/third_party/catapult" : None,
"v8/third_party/colorama/src" : None,
@@ -41,9 +40,6 @@ GCLIENT_SOLUTION = [
"v8/tools/luci-go" : None,
"v8/tools/swarming_client" : None,
},
- "custom_vars": {
- "build_for_node" : True,
- },
},
]
@@ -56,6 +52,8 @@ def EnsureGit(v8_path):
return False
print "Initializing temporary git repository in v8."
subprocess.check_call(["git", "init"], cwd=v8_path)
+ subprocess.check_call(["git", "config", "user.name", "\"Ada Lovelace\""], cwd=v8_path)
+ subprocess.check_call(["git", "config", "user.email", "\"ada@lovela.ce\""], cwd=v8_path)
subprocess.check_call(["git", "commit", "--allow-empty", "-m", "init"],
cwd=v8_path)
return True
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
index ebd953a903..5d7e4daff4 100755
--- a/deps/v8/tools/node/update_node.py
+++ b/deps/v8/tools/node/update_node.py
@@ -54,6 +54,9 @@ ADD_TO_GITIGNORE = [ "/testing/gtest/*",
"!/third_party/jinja2",
"!/third_party/markupsafe" ]
+# Node.js owns deps/v8/gypfiles in their downstream repository.
+FILES_TO_KEEP = [ "gypfiles" ]
+
def RunGclient(path):
assert os.path.isdir(path)
print ">> Running gclient sync"
@@ -73,7 +76,7 @@ def CommitPatch(options):
cwd=options.v8_path,
)
-def UpdateTarget(repository, options):
+def UpdateTarget(repository, options, files_to_keep):
source = os.path.join(options.v8_path, *repository)
target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
print ">> Updating target directory %s" % target
@@ -83,16 +86,24 @@ def UpdateTarget(repository, options):
# Remove possible remnants of previous incomplete runs.
node_common.UninitGit(target)
- git_commands = [
- ["git", "init"], # initialize target repo
- ["git", "remote", "add", "origin", source], # point to the source repo
- ["git", "fetch", "origin", "HEAD"], # sync to the current branch
- ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch
- ["git", "clean", "-fd"], # delete removed files
- ]
+ git_args = []
+ git_args.append(["init"]) # initialize target repo
+
+ if files_to_keep:
+ git_args.append(["add"] + files_to_keep) # add and commit
+ git_args.append(["commit", "-m", "keep files"]) # files we want to keep
+
+ git_args.append(["remote", "add", "source", source]) # point to source repo
+ git_args.append(["fetch", "source", "HEAD"]) # sync to current branch
+ git_args.append(["checkout", "-f", "FETCH_HEAD"]) # switch to that branch
+ git_args.append(["clean", "-fd"]) # delete removed files
+
+ if files_to_keep:
+ git_args.append(["cherry-pick", "master"]) # restore kept files
+
try:
- for command in git_commands:
- subprocess.check_call(command, cwd=target)
+ for args in git_args:
+ subprocess.check_call(["git"] + args, cwd=target)
except:
raise
finally:
@@ -155,11 +166,11 @@ def Main(args):
if options.with_patch:
CommitPatch(options)
# Update main V8 repository.
- UpdateTarget([""], options)
+ UpdateTarget([""], options, FILES_TO_KEEP)
# Patch .gitignore before updating sub-repositories.
UpdateGitIgnore(options)
for repo in SUB_REPOSITORIES:
- UpdateTarget(repo, options)
+ UpdateTarget(repo, options, None)
if options.commit:
CreateCommit(options)
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 1a49223996..bcee2b8258 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -39,7 +39,6 @@
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/parsing/scanner-character-streams.h"
@@ -59,9 +58,9 @@ class StringResource8 : public v8::String::ExternalOneByteStringResource {
int length_;
};
-std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
- const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate,
- v8::Local<v8::Context> context) {
+v8::base::TimeDelta RunBaselineParser(const char* fname, Encoding encoding,
+ int repeat, v8::Isolate* isolate,
+ v8::Local<v8::Context> context) {
int length = 0;
const byte* source = ReadFileAndRepeat(fname, &length, repeat);
v8::Local<v8::String> source_handle;
@@ -87,42 +86,21 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
break;
}
}
- v8::base::TimeDelta parse_time1, parse_time2;
+ v8::base::TimeDelta parse_time1;
Handle<Script> script =
reinterpret_cast<i::Isolate*>(isolate)->factory()->NewScript(
v8::Utils::OpenHandle(*source_handle));
- i::ScriptData* cached_data_impl = NULL;
- // First round of parsing (produce data to cache).
- {
- ParseInfo info(script);
- info.set_cached_data(&cached_data_impl);
- info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
- v8::base::ElapsedTimer timer;
- timer.Start();
- bool success =
- parsing::ParseProgram(&info, reinterpret_cast<i::Isolate*>(isolate));
- parse_time1 = timer.Elapsed();
- if (!success) {
- fprintf(stderr, "Parsing failed\n");
- return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
- }
- }
- // Second round of parsing (consume cached data).
- {
- ParseInfo info(script);
- info.set_cached_data(&cached_data_impl);
- info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
- v8::base::ElapsedTimer timer;
- timer.Start();
- bool success =
- parsing::ParseProgram(&info, reinterpret_cast<i::Isolate*>(isolate));
- parse_time2 = timer.Elapsed();
- if (!success) {
- fprintf(stderr, "Parsing failed\n");
- return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
- }
+ ParseInfo info(script);
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ bool success =
+ parsing::ParseProgram(&info, reinterpret_cast<i::Isolate*>(isolate));
+ parse_time1 = timer.Elapsed();
+ if (!success) {
+ fprintf(stderr, "Parsing failed\n");
+ return v8::base::TimeDelta();
}
- return std::make_pair(parse_time1, parse_time2);
+ return parse_time1;
}
@@ -167,19 +145,14 @@ int main(int argc, char* argv[]) {
{
v8::Context::Scope scope(context);
double first_parse_total = 0;
- double second_parse_total = 0;
for (size_t i = 0; i < fnames.size(); i++) {
- std::pair<v8::base::TimeDelta, v8::base::TimeDelta> time =
- RunBaselineParser(fnames[i].c_str(), encoding, repeat, isolate,
- context);
- first_parse_total += time.first.InMillisecondsF();
- second_parse_total += time.second.InMillisecondsF();
+ v8::base::TimeDelta time = RunBaselineParser(
+ fnames[i].c_str(), encoding, repeat, isolate, context);
+ first_parse_total += time.InMillisecondsF();
}
if (benchmark.empty()) benchmark = "Baseline";
- printf("%s(FirstParseRunTime): %.f ms\n", benchmark.c_str(),
+ printf("%s(ParseRunTime): %.f ms\n", benchmark.c_str(),
first_parse_total);
- printf("%s(SecondParseRunTime): %.f ms\n", benchmark.c_str(),
- second_parse_total);
}
}
v8::V8::Dispose();
diff --git a/deps/v8/tools/parser-shell.gyp b/deps/v8/tools/parser-shell.gyp
deleted file mode 100644
index 9b94888edf..0000000000
--- a/deps/v8/tools/parser-shell.gyp
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- 'v8_enable_i18n_support%': 1,
- },
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'parser-shell',
- 'type': 'executable',
- 'dependencies': [
- '../src/v8.gyp:v8',
- '../src/v8.gyp:v8_libbase',
- '../src/v8.gyp:v8_libplatform',
- ],
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- }],
- ],
- 'include_dirs+': [
- '..',
- ],
- 'sources': [
- 'parser-shell.cc',
- 'shell-utils.h',
- ],
- },
- ],
-}
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 9ac26ddb16..917b6e2383 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -52,6 +52,7 @@ from testrunner.local import utils
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
+# We now run our own header guard check in PRESUBMIT.py.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
@@ -280,8 +281,7 @@ class SourceProcessor(SourceFileProcessor):
Check that all files include a copyright notice and no trailing whitespaces.
"""
- RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
- '.status', '.gyp', '.gypi']
+ RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status']
def __init__(self):
self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
@@ -331,7 +331,7 @@ class SourceProcessor(SourceFileProcessor):
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
- name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
+ name in ('third_party', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index d295e37d64..5dd60df459 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -51,6 +51,7 @@ PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
WATCHLISTS_FILE = "WATCHLISTS"
+RELEASE_WORKDIR = "/tmp/v8-release-scripts-work-dir/"
# V8 base directory.
V8_BASE = os.path.dirname(
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 877d121b49..bf526bf5d8 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -241,11 +241,12 @@ class MergeToBranch(ScriptsBase):
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
- "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+ "PERSISTFILE_BASENAME": RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
- "/tmp/v8-merge-to-branch-tempfile-already-merging",
- "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
- "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+ "TEMPORARY_PATCH_FILE":
+ RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+ "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
index 2dd43eae3a..44ed858f7e 100755
--- a/deps/v8/tools/release/roll_merge.py
+++ b/deps/v8/tools/release/roll_merge.py
@@ -262,11 +262,13 @@ class RollMerge(ScriptsBase):
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
- "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+ "PERSISTFILE_BASENAME":
+ RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
- "/tmp/v8-merge-to-branch-tempfile-already-merging",
- "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
- "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+ "TEMPORARY_PATCH_FILE":
+ RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+ "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
diff --git a/deps/v8/tools/release/update_node.py b/deps/v8/tools/release/update_node.py
deleted file mode 100755
index d060e5c615..0000000000
--- a/deps/v8/tools/release/update_node.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to update V8 in a Node.js checkout.
-
-Requirements:
- - Node.js checkout in which V8 should be updated.
- - V8 checkout at the commit to which Node.js should be updated.
-
-Usage:
- $ update_node.py <path_to_v8> <path_to_node>
-
- This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
- and a few V8 dependencies require in Node.js. It will also update .gitignore
- appropriately.
-
-Optional flags:
- --gclient Run `gclient sync` on the V8 checkout before updating.
- --commit Create commit with the updated V8 in the Node.js checkout.
- --with-patch Also include currently staged files in the V8 checkout.
-"""
-
-import argparse
-import os
-import shutil
-import subprocess
-import sys
-import stat
-
-TARGET_SUBDIR = os.path.join("deps", "v8")
-
-SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
- ["testing", "gtest"],
- ["third_party", "jinja2"],
- ["third_party", "markupsafe"] ]
-
-DELETE_FROM_GITIGNORE = [ "/base",
- "/testing/gtest",
- "/third_party/jinja2",
- "/third_party/markupsafe" ]
-
-# Node.js requires only a single header file from gtest to build V8.
-# Both jinja2 and markupsafe are required to generate part of the inspector.
-ADD_TO_GITIGNORE = [ "/testing/gtest/*",
- "!/testing/gtest/include",
- "/testing/gtest/include/*",
- "!/testing/gtest/include/gtest",
- "/testing/gtest/include/gtest/*",
- "!/testing/gtest/include/gtest/gtest_prod.h",
- "!/third_party/jinja2",
- "!/third_party/markupsafe" ]
-
-def RunGclient(path):
- assert os.path.isdir(path)
- print ">> Running gclient sync"
- subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
-
-def UninitGit(path):
- target = os.path.join(path, ".git")
- if os.path.isdir(target):
- print ">> Cleaning up %s" % path
- def OnRmError(func, path, exec_info):
- # This might happen on Windows
- os.chmod(path, stat.S_IWRITE)
- os.unlink(path)
- shutil.rmtree(target, onerror=OnRmError)
-
-def CommitPatch(options):
- """Makes a dummy commit for the changes in the index.
-
- On trybots, bot_updated applies the patch to the index. We commit it to make
- the fake git clone fetch it into node.js. We can leave the commit, as
- bot_update will ensure a clean state on each run.
- """
- print ">> Committing patch"
- subprocess.check_call(
- ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
- "commit", "--allow-empty", "-m", "placeholder-commit"],
- cwd=options.v8_path,
- )
-
-def UpdateTarget(repository, options):
- source = os.path.join(options.v8_path, *repository)
- target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
- print ">> Updating target directory %s" % target
- print ">> from active branch at %s" % source
- if not os.path.exists(target):
- os.makedirs(target)
- # Remove possible remnants of previous incomplete runs.
- UninitGit(target)
-
- git_commands = [
- ["git", "init"], # initialize target repo
- ["git", "remote", "add", "origin", source], # point to the source repo
- ["git", "fetch", "origin", "HEAD"], # sync to the current branch
- ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch
- ["git", "clean", "-fd"], # delete removed files
- ]
- try:
- for command in git_commands:
- subprocess.check_call(command, cwd=target)
- except:
- raise
- finally:
- UninitGit(target)
-
-def UpdateGitIgnore(options):
- file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
- assert os.path.isfile(file_name)
- print ">> Updating .gitignore with lines"
- with open(file_name) as gitignore:
- content = gitignore.readlines()
- content = [x.strip() for x in content]
- for x in DELETE_FROM_GITIGNORE:
- if x in content:
- print "- %s" % x
- content.remove(x)
- for x in ADD_TO_GITIGNORE:
- if x not in content:
- print "+ %s" % x
- content.append(x)
- content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
- with open(file_name, "w") as gitignore:
- for x in content:
- gitignore.write("%s\n" % x)
-
-def CreateCommit(options):
- print ">> Creating commit."
- # Find git hash from source.
- githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
- cwd=options.v8_path).strip()
- # Create commit at target.
- git_commands = [
- ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch
- ["git", "add", "."], # add files
- ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit
- ]
- for command in git_commands:
- subprocess.check_call(command, cwd=options.node_path)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(description="Update V8 in Node.js")
- parser.add_argument("v8_path", help="Path to V8 checkout")
- parser.add_argument("node_path", help="Path to Node.js checkout")
- parser.add_argument("--gclient", action="store_true", help="Run gclient sync")
- parser.add_argument("--commit", action="store_true", help="Create commit")
- parser.add_argument("--with-patch", action="store_true",
- help="Apply also staged files")
- options = parser.parse_args(args)
- assert os.path.isdir(options.v8_path)
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.node_path)
- options.node_path = os.path.abspath(options.node_path)
- return options
-
-def Main(args):
- options = ParseOptions(args)
- if options.gclient:
- RunGclient(options.v8_path)
- # Commit patch on trybots to main V8 repository.
- if options.with_patch:
- CommitPatch(options)
- # Update main V8 repository.
- UpdateTarget([""], options)
- # Patch .gitignore before updating sub-repositories.
- UpdateGitIgnore(options)
- for repo in SUB_REPOSITORIES:
- UpdateTarget(repo, options)
- if options.commit:
- CreateCommit(options)
-
-if __name__ == "__main__":
- Main(sys.argv[1:])
diff --git a/deps/v8/tools/run-deopt-fuzzer.gyp b/deps/v8/tools/run-deopt-fuzzer.gyp
deleted file mode 100644
index 9eb6b538bc..0000000000
--- a/deps/v8/tools/run-deopt-fuzzer.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'run_deopt_fuzzer_run',
- 'type': 'none',
- 'dependencies': [
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'run-deopt-fuzzer.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/run-deopt-fuzzer.isolate b/deps/v8/tools/run-deopt-fuzzer.isolate
deleted file mode 100644
index 196fb5dbbc..0000000000
--- a/deps/v8/tools/run-deopt-fuzzer.isolate
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'command': [
- 'run-deopt-fuzzer.py',
- ],
- 'files': [
- 'run-deopt-fuzzer.py',
- ],
- },
- 'includes': [
- 'testrunner/testrunner.isolate',
- '../src/d8.isolate',
- '../test/mjsunit/mjsunit.isolate',
- '../test/webkit/webkit.isolate',
- ],
-}
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
deleted file mode 100755
index ac2344b530..0000000000
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import sys
-
-from testrunner import deopt_fuzzer
-
-
-if __name__ == "__main__":
- sys.exit(deopt_fuzzer.DeoptFuzzer().execute())
diff --git a/deps/v8/tools/run-num-fuzzer.gyp b/deps/v8/tools/run-num-fuzzer.gyp
deleted file mode 100644
index bd3b9d6423..0000000000
--- a/deps/v8/tools/run-num-fuzzer.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'run_num_fuzzer_run',
- 'type': 'none',
- 'dependencies': [
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'run-num-fuzzer.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/run-num-fuzzer.isolate b/deps/v8/tools/run-num-fuzzer.isolate
index d0aca421a7..e9acbd4cb0 100644
--- a/deps/v8/tools/run-num-fuzzer.isolate
+++ b/deps/v8/tools/run-num-fuzzer.isolate
@@ -4,11 +4,10 @@
{
'variables': {
'command': [
- 'run-deopt-fuzzer.py',
+ 'run-num-fuzzer.py',
],
'files': [
- 'run-deopt-fuzzer.py',
- 'run-gc-fuzzer.py',
+ 'run-num-fuzzer.py',
],
},
'includes': [
diff --git a/deps/v8/tools/run-gc-fuzzer.py b/deps/v8/tools/run-num-fuzzer.py
index 6311d4fd29..9b5a065158 100755
--- a/deps/v8/tools/run-gc-fuzzer.py
+++ b/deps/v8/tools/run-num-fuzzer.py
@@ -1,14 +1,14 @@
#!/usr/bin/env python
#
-# Copyright 2017 the V8 project authors. All rights reserved.
+# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
-from testrunner import gc_fuzzer
+from testrunner import num_fuzzer
if __name__ == "__main__":
- sys.exit(gc_fuzzer.GCFuzzer().execute())
+ sys.exit(num_fuzzer.NumFuzzer().execute())
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 3823eb510c..e19f6a056b 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -1116,7 +1116,8 @@ def Main(args):
# Traverse graph/trace tree and iterate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
runnable_name = "/".join(runnable.graphs)
- if not runnable_name.startswith(options.filter):
+ if (not runnable_name.startswith(options.filter) and
+ runnable_name + "/" != options.filter):
continue
print ">>> Running suite: %s" % runnable_name
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 8fc09eed7b..7721360e2a 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -5,8 +5,10 @@
from collections import OrderedDict
import json
+import multiprocessing
import optparse
import os
+import shlex
import sys
@@ -17,10 +19,14 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__))))
-from local import testsuite
-from local import utils
-
-from testproc.shard import ShardProc
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.test_config import TestConfig
+from testrunner.testproc import progress
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.shard import ShardProc
+from testrunner.testproc.sigproc import SignalProc
+from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = (
@@ -31,8 +37,6 @@ BASE_DIR = (
DEFAULT_OUT_GN = 'out.gn'
-ARCH_GUESS = utils.DefaultArch()
-
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
@@ -90,6 +94,16 @@ TEST_MAP = {
],
}
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mips",
+ "mipsel",
+ "mips64",
+ "mips64el",
+ "s390",
+ "s390x",
+ "arm64"]
+
class ModeConfig(object):
def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
@@ -138,6 +152,12 @@ MODES = {
),
}
+PROGRESS_INDICATORS = {
+ 'verbose': progress.VerboseProgressIndicator,
+ 'dots': progress.DotsProgressIndicator,
+ 'color': progress.ColorProgressIndicator,
+ 'mono': progress.MonochromeProgressIndicator,
+}
class TestRunnerError(Exception):
pass
@@ -162,6 +182,10 @@ class BuildConfig(object):
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
self.ubsan_vptr = build_config['is_ubsan_vptr']
+ # Export only for MIPS target
+ if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
+ self.mips_arch_variant = build_config['mips_arch_variant']
+ self.mips_use_msa = build_config['mips_use_msa']
def __str__(self):
detected_options = []
@@ -204,6 +228,10 @@ class BaseTestRunner(object):
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
+ if options.swarming:
+ # Swarming doesn't print how isolated commands are called. Lets make
+ # this less cryptic by printing it ourselves.
+ print ' '.join(sys.argv)
self._load_build_config(options)
@@ -215,14 +243,19 @@ class BaseTestRunner(object):
raise
args = self._parse_test_args(args)
- suites = self._get_suites(args, options.verbose)
+ suites = self._get_suites(args, options)
+ self._prepare_suites(suites, options)
self._setup_env()
- return self._do_execute(suites, args, options)
+
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+ tests = [t for s in suites for t in s.tests]
+ return self._do_execute(tests, args, options)
except TestRunnerError:
- return 1
+ return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
- return 2
+ return utils.EXIT_CODE_INTERRUPTED
def _create_parser(self):
parser = optparse.OptionParser()
@@ -247,14 +280,63 @@ class BaseTestRunner(object):
" and buildbot builds): %s" % MODES.keys())
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
"directory will be used")
- parser.add_option("-v", "--verbose", help="Verbose output",
- default=False, action="store_true")
- parser.add_option("--shard-count",
- help="Split tests into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
+ parser.add_option("--total-timeout-sec", default=0, type="int",
+ help="How long should fuzzer run")
+ parser.add_option("--swarming", default=False, action="store_true",
+ help="Indicates running test driver on swarming.")
+
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type=int)
+
+ # Shard
+ parser.add_option("--shard-count", default=1, type=int,
+ help="Split tests into this number of shards")
+ parser.add_option("--shard-run", default=1, type=int,
+ help="Run this shard from the split up tests.")
+
+ # Progress
+ parser.add_option("-p", "--progress",
+ choices=PROGRESS_INDICATORS.keys(), default="mono",
+ help="The style of progress indicator (verbose, dots, "
+ "color, mono)")
+ parser.add_option("--json-test-results",
+ help="Path to a file for storing json results.")
+ parser.add_option("--junitout", help="File name of the JUnit output")
+ parser.add_option("--junittestsuite", default="v8tests",
+ help="The testsuite name in the JUnit output file")
+
+ # Rerun
+ parser.add_option("--rerun-failures-count", default=0, type=int,
+ help="Number of times to rerun each failing test case. "
+ "Very slow tests will be rerun only once.")
+ parser.add_option("--rerun-failures-max", default=100, type=int,
+ help="Maximum number of failing test cases to rerun")
+
+ # Test config
+ parser.add_option("--command-prefix", default="",
+ help="Prepended to each shell command used to run a test")
+ parser.add_option("--extra-flags", action="append", default=[],
+ help="Additional flags to pass to each test command")
+ parser.add_option("--isolates", action="store_true", default=False,
+ help="Whether to test isolates")
+ parser.add_option("--no-harness", "--noharness",
+ default=False, action="store_true",
+ help="Run without test harness of a given suite")
+ parser.add_option("--random-seed", default=0, type=int,
+ help="Default seed for initializing random generator")
+ parser.add_option("-t", "--timeout", default=60, type=int,
+ help="Timeout for single test in seconds")
+ parser.add_option("-v", "--verbose", default=False, action="store_true",
+ help="Verbose output")
+
+ # TODO(machenbach): Temporary options for rolling out new test runner
+ # features.
+ parser.add_option("--mastername", default='',
+ help="Mastername property from infrastructure. Not "
+ "setting this option indicates manual usage.")
+ parser.add_option("--buildername", default='',
+ help="Buildername property from infrastructure. Not "
+ "setting this option indicates manual usage.")
def _add_parser_options(self, parser):
pass
@@ -378,6 +460,12 @@ class BaseTestRunner(object):
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
def _buildbot_to_v8_mode(self, config):
"""Convert buildbot build configs to configs understood by the v8 runner.
@@ -471,9 +559,9 @@ class BaseTestRunner(object):
return reduce(list.__add__, map(expand_test_group, args), [])
- def _get_suites(self, args, verbose=False):
+ def _get_suites(self, args, options):
names = self._args_to_suite_names(args)
- return self._load_suites(names, verbose)
+ return self._load_suites(names, options)
def _args_to_suite_names(self, args):
# Use default tests if no test configuration was provided at the cmd line.
@@ -484,21 +572,100 @@ class BaseTestRunner(object):
def _get_default_suite_names(self):
return []
- def _expand_test_group(self, name):
- return TEST_MAP.get(name, [name])
-
- def _load_suites(self, names, verbose=False):
+ def _load_suites(self, names, options):
+ test_config = self._create_test_config(options)
def load_suite(name):
- if verbose:
+ if options.verbose:
print '>>> Loading test suite: %s' % name
return testsuite.TestSuite.LoadTestSuite(
- os.path.join(self.basedir, 'test', name))
+ os.path.join(self.basedir, 'test', name),
+ test_config)
return map(load_suite, names)
+ def _prepare_suites(self, suites, options):
+ self._load_status_files(suites, options)
+ for s in suites:
+ s.ReadTestCases()
+
+ def _load_status_files(self, suites, options):
+ # simd_mips is true if SIMD is fully supported on MIPS
+ variables = self._get_statusfile_variables(options)
+ for s in suites:
+ s.ReadStatusFile(variables)
+
+ def _get_statusfile_variables(self, options):
+ simd_mips = (
+ self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+ self.build_config.mips_arch_variant == "r6" and
+ self.build_config.mips_use_msa)
+
+ # TODO(all): Combine "simulator" and "simulator_run".
+ # TODO(machenbach): In GN we can derive simulator run from
+ # target_arch != v8_target_arch in the dumped build config.
+ return {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": False,
+ "endurance_fuzzer": False,
+ "gc_fuzzer": False,
+ "gc_stress": False,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": options.no_harness,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": False,
+ "predictable": self.build_config.predictable,
+ "simd_mips": simd_mips,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": False,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+
+ def _create_test_config(self, options):
+ timeout = options.timeout * self._timeout_scalefactor(options)
+ return TestConfig(
+ command_prefix=options.command_prefix,
+ extra_flags=options.extra_flags,
+ isolates=options.isolates,
+ mode_flags=self.mode_options.flags,
+ no_harness=options.no_harness,
+ noi18n=self.build_config.no_i18n,
+ random_seed=options.random_seed,
+ shell_dir=self.outdir,
+ timeout=timeout,
+ verbose=options.verbose,
+ )
+
+ def _timeout_scalefactor(self, options):
+ factor = self.mode_options.timeout_scalefactor
+
+ # Simulators are slow, therefore allow a longer timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ factor *= 2
+
+ # Predictable mode is slower.
+ if self.build_config.predictable:
+ factor *= 2
+
+ return factor
+
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
+ def _prepare_procs(self, procs):
+ procs = filter(None, procs)
+ for i in xrange(0, len(procs) - 1):
+ procs[i].connect_to(procs[i + 1])
+ procs[0].setup()
+
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
@@ -541,3 +708,29 @@ class BaseTestRunner(object):
return 1, 1
return shard_run, shard_count
+
+ def _create_progress_indicators(self, options):
+ procs = [PROGRESS_INDICATORS[options.progress]()]
+ if options.junitout:
+ procs.append(progress.JUnitTestProgressIndicator(options.junitout,
+ options.junittestsuite))
+ if options.json_test_results:
+ procs.append(progress.JsonTestProgressIndicator(
+ options.json_test_results,
+ self.build_config.arch,
+ self.mode_options.execution_mode))
+ return procs
+
+ def _create_timeout_proc(self, options):
+ if not options.total_timeout_sec:
+ return None
+ return TimeoutProc(options.total_timeout_sec)
+
+ def _create_signal_proc(self):
+ return SignalProc()
+
+ def _create_rerun_proc(self, options):
+ if not options.rerun_failures_count:
+ return None
+ return RerunProc(options.rerun_failures_count,
+ options.rerun_failures_max)
diff --git a/deps/v8/tools/testrunner/deopt_fuzzer.py b/deps/v8/tools/testrunner/deopt_fuzzer.py
deleted file mode 100755
index 5e6b79f5e9..0000000000
--- a/deps/v8/tools/testrunner/deopt_fuzzer.py
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from os.path import join
-import json
-import math
-import multiprocessing
-import os
-import random
-import shlex
-import sys
-import time
-
-# Adds testrunner to the path hence it has to be imported at the beggining.
-import base_runner
-
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.local import verbose
-from testrunner.objects import context
-
-
-DEFAULT_SUITES = ["mjsunit", "webkit"]
-TIMEOUT_DEFAULT = 60
-
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mipsel"]
-MAX_DEOPT = 1000000000
-DISTRIBUTION_MODES = ["smooth", "random"]
-
-
-class DeoptFuzzer(base_runner.BaseTestRunner):
- def __init__(self, *args, **kwargs):
- super(DeoptFuzzer, self).__init__(*args, **kwargs)
-
- class RandomDistribution:
- def __init__(self, seed=None):
- seed = seed or random.randint(1, sys.maxint)
- print "Using random distribution with seed %d" % seed
- self._random = random.Random(seed)
-
- def Distribute(self, n, m):
- if n > m:
- n = m
- return self._random.sample(xrange(1, m + 1), n)
-
- class SmoothDistribution:
- """Distribute n numbers into the interval [1:m].
- F1: Factor of the first derivation of the distribution function.
- F2: Factor of the second derivation of the distribution function.
- With F1 and F2 set to 0, the distribution will be equal.
- """
- def __init__(self, factor1=2.0, factor2=0.2):
- self._factor1 = factor1
- self._factor2 = factor2
-
- def Distribute(self, n, m):
- if n > m:
- n = m
- if n <= 1:
- return [ 1 ]
-
- result = []
- x = 0.0
- dx = 1.0
- ddx = self._factor1
- dddx = self._factor2
- for i in range(0, n):
- result += [ x ]
- x += dx
- dx += ddx
- ddx += dddx
-
- # Project the distribution into the interval [0:M].
- result = [ x * m / result[-1] for x in result ]
-
- # Equalize by n. The closer n is to m, the more equal will be the
- # distribution.
- for (i, x) in enumerate(result):
- # The value of x if it was equally distributed.
- equal_x = i / float(n - 1) * float(m - 1) + 1
-
- # Difference factor between actual and equal distribution.
- diff = 1 - (x / equal_x)
-
- # Equalize x dependent on the number of values to distribute.
- result[i] = int(x + (i + 1) * diff)
- return result
-
-
- def _distribution(self, options):
- if options.distribution_mode == "random":
- return self.RandomDistribution(options.seed)
- if options.distribution_mode == "smooth":
- return self.SmoothDistribution(options.distribution_factor1,
- options.distribution_factor2)
-
-
- def _add_parser_options(self, parser):
- parser.add_option("--command-prefix",
- help="Prepended to each shell command used to run a test",
- default="")
- parser.add_option("--coverage", help=("Exponential test coverage "
- "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
- default=0.4, type="float")
- parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
- "with a small number of deopt points (range 0, inf)"),
- default=20, type="int")
- parser.add_option("--distribution-factor1", help=("Factor of the first "
- "derivation of the distribution function"), default=2.0,
- type="float")
- parser.add_option("--distribution-factor2", help=("Factor of the second "
- "derivation of the distribution function"), default=0.7,
- type="float")
- parser.add_option("--distribution-mode", help=("How to select deopt points "
- "for a given test (smooth|random)"),
- default="smooth")
- parser.add_option("--dump-results-file", help=("Dump maximum number of "
- "deopt points per test to a file"))
- parser.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- default="")
- parser.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- parser.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- parser.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
- parser.add_option("--seed", help="The seed for the random distribution",
- type="int")
- parser.add_option("-t", "--timeout", help="Timeout in seconds",
- default= -1, type="int")
- parser.add_option("--random-seed", default=0, dest="random_seed",
- help="Default seed for initializing random generator")
- parser.add_option("--fuzzer-random-seed", default=0,
- help="Default seed for initializing fuzzer random "
- "generator")
- return parser
-
-
- def _process_options(self, options):
- # Special processing of other options, sorted alphabetically.
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = shlex.split(options.extra_flags)
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
- while options.random_seed == 0:
- options.random_seed = random.SystemRandom().randint(-2147483648,
- 2147483647)
- if not options.distribution_mode in DISTRIBUTION_MODES:
- print "Unknown distribution mode %s" % options.distribution_mode
- return False
- if options.distribution_factor1 < 0.0:
- print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
- % options.distribution_factor1)
- options.distribution_factor1 = 0.0
- if options.distribution_factor2 < 0.0:
- print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
- % options.distribution_factor2)
- options.distribution_factor2 = 0.0
- if options.coverage < 0.0 or options.coverage > 1.0:
- print ("Coverage %s is out of range. Defaulting to 0.4"
- % options.coverage)
- options.coverage = 0.4
- if options.coverage_lift < 0:
- print ("Coverage lift %s is out of range. Defaulting to 0"
- % options.coverage_lift)
- options.coverage_lift = 0
- return True
-
- def _calculate_n_tests(self, m, options):
- """Calculates the number of tests from m deopt points with exponential
- coverage.
- The coverage is expected to be between 0.0 and 1.0.
- The 'coverage lift' lifts the coverage for tests with smaller m values.
- """
- c = float(options.coverage)
- l = float(options.coverage_lift)
- return int(math.pow(m, (m * c + l) / (m + l)))
-
- def _get_default_suite_names(self):
- return DEFAULT_SUITES
-
- def _do_execute(self, suites, args, options):
- print(">>> Running tests for %s.%s" % (self.build_config.arch,
- self.mode_name))
-
- dist = self._distribution(options)
-
- # Populate context object.
- timeout = options.timeout
- if timeout == -1:
- # Simulators are slow, therefore allow a longer default timeout.
- if self.build_config.arch in SLOW_ARCHS:
- timeout = 2 * TIMEOUT_DEFAULT;
- else:
- timeout = TIMEOUT_DEFAULT;
-
- timeout *= self.mode_options.timeout_scalefactor
- ctx = context.Context(self.build_config.arch,
- self.mode_options.execution_mode,
- self.outdir,
- self.mode_options.flags, options.verbose,
- timeout, options.isolates,
- options.command_prefix,
- options.extra_flags,
- False, # Keep i18n on by default.
- options.random_seed,
- True, # No sorting of test cases.
- 0, # Don't rerun failing tests.
- 0, # No use of a rerun-failing-tests maximum.
- False, # No no_harness mode.
- False, # Don't use perf data.
- False) # Coverage not supported.
-
- # Find available test suites and read test cases from them.
- variables = {
- "arch": self.build_config.arch,
- "asan": self.build_config.asan,
- "byteorder": sys.byteorder,
- "dcheck_always_on": self.build_config.dcheck_always_on,
- "deopt_fuzzer": True,
- "gc_fuzzer": False,
- "gc_stress": False,
- "gcov_coverage": self.build_config.gcov_coverage,
- "isolates": options.isolates,
- "mode": self.mode_options.status_mode,
- "msan": self.build_config.msan,
- "no_harness": False,
- "no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
- "novfp3": False,
- "predictable": self.build_config.predictable,
- "simulator": utils.UseSimulator(self.build_config.arch),
- "simulator_run": False,
- "system": utils.GuessOS(),
- "tsan": self.build_config.tsan,
- "ubsan_vptr": self.build_config.ubsan_vptr,
- }
- num_tests = 0
- test_id = 0
-
- # Remember test case prototypes for the fuzzing phase.
- test_backup = dict((s, []) for s in suites)
-
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- s.FilterTestCasesByStatus(False)
-
- test_backup[s] = s.tests
- analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
- "--print-deopt-stress"]
- s.tests = [t.create_variant(t.variant, analysis_flags, 'analysis')
- for t in s.tests]
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- t.cmd = t.get_command(ctx)
- test_id += 1
-
- if num_tests == 0:
- print "No tests to run."
- return 0
-
- print(">>> Collection phase")
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- exit_code = runner.Run(options.j)
-
- print(">>> Analysis phase")
- num_tests = 0
- test_id = 0
- for s in suites:
- test_results = {}
- for t in s.tests:
- for line in runner.outputs[t].stdout.splitlines():
- if line.startswith("=== Stress deopt counter: "):
- test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
- for t in s.tests:
- if t.path not in test_results:
- print "Missing results for %s" % t.path
- if options.dump_results_file:
- results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
- with file("%s.%d.txt" % (options.dump_results_file, time.time()),
- "w") as f:
- f.write(json.dumps(results_dict))
-
- # Reset tests and redistribute the prototypes from the collection phase.
- s.tests = []
- if options.verbose:
- print "Test distributions:"
- for t in test_backup[s]:
- max_deopt = test_results.get(t.path, 0)
- if max_deopt == 0:
- continue
- n_deopt = self._calculate_n_tests(max_deopt, options)
- distribution = dist.Distribute(n_deopt, max_deopt)
- if options.verbose:
- print "%s %s" % (t.path, distribution)
- for n, d in enumerate(distribution):
- fuzzing_flags = ["--deopt-every-n-times", "%d" % d]
- s.tests.append(t.create_variant(t.variant, fuzzing_flags, n))
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- t.cmd = t.get_command(ctx)
- test_id += 1
-
- if num_tests == 0:
- print "No tests to run."
- return exit_code
-
- print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- code = runner.Run(options.j)
- return exit_code or code
-
-
-if __name__ == '__main__':
- sys.exit(DeoptFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/gc_fuzzer.py b/deps/v8/tools/testrunner/gc_fuzzer.py
deleted file mode 100755
index 18be227d98..0000000000
--- a/deps/v8/tools/testrunner/gc_fuzzer.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from os.path import join
-import itertools
-import json
-import math
-import multiprocessing
-import os
-import random
-import shlex
-import sys
-import time
-
-# Adds testrunner to the path hence it has to be imported at the beggining.
-import base_runner
-
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.local import verbose
-from testrunner.objects import context
-
-
-DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
-TIMEOUT_DEFAULT = 60
-
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mipsel"]
-
-
-class GCFuzzer(base_runner.BaseTestRunner):
- def __init__(self, *args, **kwargs):
- super(GCFuzzer, self).__init__(*args, **kwargs)
-
- self.fuzzer_rng = None
-
- def _add_parser_options(self, parser):
- parser.add_option("--command-prefix",
- help="Prepended to each shell command used to run a test",
- default="")
- parser.add_option("--coverage", help=("Exponential test coverage "
- "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
- default=0.4, type="float")
- parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
- "with a low memory size reached (range 0, inf)"),
- default=20, type="int")
- parser.add_option("--dump-results-file", help="Dump maximum limit reached")
- parser.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- default="")
- parser.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- parser.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- parser.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
- parser.add_option("-t", "--timeout", help="Timeout in seconds",
- default= -1, type="int")
- parser.add_option("--random-seed", default=0,
- help="Default seed for initializing random generator")
- parser.add_option("--fuzzer-random-seed", default=0,
- help="Default seed for initializing fuzzer random "
- "generator")
- parser.add_option("--stress-compaction", default=False, action="store_true",
- help="Enable stress_compaction_percentage flag")
-
- parser.add_option("--distribution-factor1", help="DEPRECATED")
- parser.add_option("--distribution-factor2", help="DEPRECATED")
- parser.add_option("--distribution-mode", help="DEPRECATED")
- parser.add_option("--seed", help="DEPRECATED")
- return parser
-
-
- def _process_options(self, options):
- # Special processing of other options, sorted alphabetically.
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = shlex.split(options.extra_flags)
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
- while options.random_seed == 0:
- options.random_seed = random.SystemRandom().randint(-2147483648,
- 2147483647)
- while options.fuzzer_random_seed == 0:
- options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
- 2147483647)
- self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
- return True
-
- def _calculate_n_tests(self, m, options):
- """Calculates the number of tests from m points with exponential coverage.
- The coverage is expected to be between 0.0 and 1.0.
- The 'coverage lift' lifts the coverage for tests with smaller m values.
- """
- c = float(options.coverage)
- l = float(options.coverage_lift)
- return int(math.pow(m, (m * c + l) / (m + l)))
-
- def _get_default_suite_names(self):
- return DEFAULT_SUITES
-
- def _do_execute(self, suites, args, options):
- print(">>> Running tests for %s.%s" % (self.build_config.arch,
- self.mode_name))
-
- # Populate context object.
- timeout = options.timeout
- if timeout == -1:
- # Simulators are slow, therefore allow a longer default timeout.
- if self.build_config.arch in SLOW_ARCHS:
- timeout = 2 * TIMEOUT_DEFAULT;
- else:
- timeout = TIMEOUT_DEFAULT;
-
- timeout *= self.mode_options.timeout_scalefactor
- ctx = context.Context(self.build_config.arch,
- self.mode_options.execution_mode,
- self.outdir,
- self.mode_options.flags, options.verbose,
- timeout, options.isolates,
- options.command_prefix,
- options.extra_flags,
- False, # Keep i18n on by default.
- options.random_seed,
- True, # No sorting of test cases.
- 0, # Don't rerun failing tests.
- 0, # No use of a rerun-failing-tests maximum.
- False, # No no_harness mode.
- False, # Don't use perf data.
- False) # Coverage not supported.
-
- num_tests = self._load_tests(args, options, suites, ctx)
- if num_tests == 0:
- print "No tests to run."
- return 0
-
- test_backup = dict(map(lambda s: (s, s.tests), suites))
-
- print('>>> Collection phase')
- for s in suites:
- analysis_flags = ['--fuzzer-gc-analysis']
- s.tests = map(lambda t: t.create_variant(t.variant, analysis_flags,
- 'analysis'),
- s.tests)
- for t in s.tests:
- t.cmd = t.get_command(ctx)
-
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
- exit_code = runner.Run(options.j)
-
- print('>>> Analysis phase')
- test_results = dict()
- for s in suites:
- for t in s.tests:
- # Skip failed tests.
- if t.output_proc.has_unexpected_output(runner.outputs[t]):
- print '%s failed, skipping' % t.path
- continue
- max_limit = self._get_max_limit_reached(runner.outputs[t])
- if max_limit:
- test_results[t.path] = max_limit
-
- runner = None
-
- if options.dump_results_file:
- with file("%s.%d.txt" % (options.dump_results_file, time.time()),
- "w") as f:
- f.write(json.dumps(test_results))
-
- num_tests = 0
- for s in suites:
- s.tests = []
- for t in test_backup[s]:
- max_percent = test_results.get(t.path, 0)
- if not max_percent or max_percent < 1.0:
- continue
- max_percent = int(max_percent)
-
- subtests_count = self._calculate_n_tests(max_percent, options)
-
- if options.verbose:
- print ('%s [x%d] (max marking limit=%.02f)' %
- (t.path, subtests_count, max_percent))
- for i in xrange(0, subtests_count):
- fuzzer_seed = self._next_fuzzer_seed()
- fuzzing_flags = [
- '--stress_marking', str(max_percent),
- '--fuzzer_random_seed', str(fuzzer_seed),
- ]
- if options.stress_compaction:
- fuzzing_flags.append('--stress_compaction_random')
- s.tests.append(t.create_variant(t.variant, fuzzing_flags, i))
- for t in s.tests:
- t.cmd = t.get_command(ctx)
- num_tests += len(s.tests)
-
- if num_tests == 0:
- print "No tests to run."
- return exit_code
-
- print(">>> Fuzzing phase (%d test cases)" % num_tests)
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- return runner.Run(options.j) or exit_code
-
- def _load_tests(self, args, options, suites, ctx):
- # Find available test suites and read test cases from them.
- variables = {
- "arch": self.build_config.arch,
- "asan": self.build_config.asan,
- "byteorder": sys.byteorder,
- "dcheck_always_on": self.build_config.dcheck_always_on,
- "deopt_fuzzer": False,
- "gc_fuzzer": True,
- "gc_stress": False,
- "gcov_coverage": self.build_config.gcov_coverage,
- "isolates": options.isolates,
- "mode": self.mode_options.status_mode,
- "msan": self.build_config.msan,
- "no_harness": False,
- "no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
- "novfp3": False,
- "predictable": self.build_config.predictable,
- "simulator": utils.UseSimulator(self.build_config.arch),
- "simulator_run": False,
- "system": utils.GuessOS(),
- "tsan": self.build_config.tsan,
- "ubsan_vptr": self.build_config.ubsan_vptr,
- }
-
- num_tests = 0
- test_id = 0
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- s.FilterTestCasesByStatus(False)
-
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- test_id += 1
-
- return num_tests
-
- # Parses test stdout and returns what was the highest reached percent of the
- # incremental marking limit (0-100).
- @staticmethod
- def _get_max_limit_reached(output):
- if not output.stdout:
- return None
-
- for l in reversed(output.stdout.splitlines()):
- if l.startswith('### Maximum marking limit reached ='):
- return float(l.split()[6])
-
- return None
-
- def _next_fuzzer_seed(self):
- fuzzer_seed = None
- while not fuzzer_seed:
- fuzzer_seed = self.fuzzer_rng.randint(-2147483648, 2147483647)
- return fuzzer_seed
-
-
-if __name__ == '__main__':
- sys.exit(GCFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index 93b1ac9497..adc9c2e452 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -4,6 +4,7 @@
import os
+import signal
import subprocess
import sys
import threading
@@ -17,6 +18,19 @@ SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
+def setup_testing():
+ """For testing only: We use threading under the hood instead of
+ multiprocessing to make coverage work. Signal handling is only supported
+ in the main thread, so we disable it for testing.
+ """
+ signal.signal = lambda *_: None
+
+
+class AbortException(Exception):
+ """Indicates early abort on SIGINT, SIGTERM or internal hard timeout."""
+ pass
+
+
class BaseCommand(object):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
verbose=False):
@@ -35,10 +49,16 @@ class BaseCommand(object):
process = self._start_process(**additional_popen_kwargs)
+ # Variable to communicate with the signal handler.
+ abort_occured = [False]
+ def handler(signum, frame):
+ self._abort(process, abort_occured)
+ signal.signal(signal.SIGTERM, handler)
+
# Variable to communicate with the timer.
timeout_occured = [False]
timer = threading.Timer(
- self.timeout, self._on_timeout, [process, timeout_occured])
+ self.timeout, self._abort, [process, timeout_occured])
timer.start()
start_time = time.time()
@@ -47,6 +67,9 @@ class BaseCommand(object):
timer.cancel()
+ if abort_occured[0]:
+ raise AbortException()
+
return output.Output(
process.returncode,
timeout_occured[0],
@@ -85,12 +108,12 @@ class BaseCommand(object):
def _kill_process(self, process):
raise NotImplementedError()
- def _on_timeout(self, process, timeout_occured):
- timeout_occured[0] = True
+ def _abort(self, process, abort_called):
+ abort_called[0] = True
try:
self._kill_process(process)
except OSError:
- sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+ pass
def __str__(self):
return self.to_string()
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
deleted file mode 100644
index d6d0725365..0000000000
--- a/deps/v8/tools/testrunner/local/execution.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import collections
-import os
-import re
-import shutil
-import sys
-import traceback
-
-from . import command
-from . import perfdata
-from . import statusfile
-from . import utils
-from . pool import Pool
-from ..objects import predictable
-
-
-# Base dir of the v8 checkout.
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__)))))
-TEST_DIR = os.path.join(BASE_DIR, "test")
-
-
-# Structure that keeps global information per worker process.
-ProcessContext = collections.namedtuple(
- 'process_context', ['sancov_dir'])
-
-
-TestJobResult = collections.namedtuple(
- 'TestJobResult', ['id', 'outproc_result'])
-
-def MakeProcessContext(sancov_dir):
- return ProcessContext(sancov_dir)
-
-
-# Global function for multiprocessing, because pickling a static method doesn't
-# work on Windows.
-def run_job(job, process_context):
- return job.run(process_context)
-
-
-class Job(object):
- """Stores data to be sent over the multi-process boundary.
-
- All contained fields will be pickled/unpickled.
- """
-
- def run(self, process_context):
- raise NotImplementedError()
-
-
-class TestJob(Job):
- def __init__(self, test_id, cmd, outproc, run_num):
- self.test_id = test_id
- self.cmd = cmd
- self.outproc = outproc
- self.run_num = run_num
-
- def _rename_coverage_data(self, out, sancov_dir):
- """Rename coverage data.
-
- Rename files with PIDs to files with unique test IDs, because the number
- of tests might be higher than pid_max. E.g.:
- d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID,
- 42 is the test ID and 1 is the attempt (the same test might be rerun on
- failures).
- """
- if sancov_dir and out.pid is not None:
- # Doesn't work on windows so basename is sufficient to get the shell name.
- shell = os.path.basename(self.cmd.shell)
- sancov_file = os.path.join(sancov_dir, "%s.%d.sancov" % (shell, out.pid))
-
- # Some tests are expected to fail and don't produce coverage data.
- if os.path.exists(sancov_file):
- parts = sancov_file.split(".")
- new_sancov_file = ".".join(
- parts[:-2] +
- ["test", str(self.test_id), str(self.run_num)] +
- parts[-1:]
- )
- assert not os.path.exists(new_sancov_file)
- os.rename(sancov_file, new_sancov_file)
-
- def run(self, context):
- output = self.cmd.execute()
- self._rename_coverage_data(output, context.sancov_dir)
- return TestJobResult(self.test_id, self.outproc.process(output))
-
-
-class Runner(object):
-
- def __init__(self, suites, progress_indicator, context, outproc_factory=None):
- self.datapath = os.path.join("out", "testrunner_data")
- self.perf_data_manager = perfdata.GetPerfDataManager(
- context, self.datapath)
- self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
- self.perf_failures = False
- self.printed_allocations = False
- self.outproc_factory = outproc_factory or (lambda test: test.output_proc)
- self.tests = [t for s in suites for t in s.tests]
-
- # TODO(majeski): Pass dynamically instead of keeping them in the runner.
- # Maybe some observer?
- self.outputs = {t: None for t in self.tests}
-
- self.suite_names = [s.name for s in suites]
-
- # Always pre-sort by status file, slowest tests first.
- self.tests.sort(key=lambda t: t.is_slow, reverse=True)
-
- # Sort by stored duration if not opted out.
- if not context.no_sorting:
- self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
- reverse=True)
-
- self._CommonInit(suites, progress_indicator, context)
-
- def _CommonInit(self, suites, progress_indicator, context):
- self.total = 0
- for s in suites:
- for t in s.tests:
- t.id = self.total
- self.total += 1
- self.indicator = progress_indicator
- progress_indicator.SetRunner(self)
- self.context = context
- self.succeeded = 0
- self.remaining = self.total
- self.failed = []
- self.crashed = 0
- self.reran_tests = 0
-
- def _RunPerfSafe(self, fun):
- try:
- fun()
- except Exception, e:
- print("PerfData exception: %s" % e)
- self.perf_failures = True
-
- def _MaybeRerun(self, pool, test, result):
- if test.run <= self.context.rerun_failures_count:
- # Possibly rerun this test if its run count is below the maximum per
- # test. <= as the flag controls reruns not including the first run.
- if test.run == 1:
- # Count the overall number of reran tests on the first rerun.
- if self.reran_tests < self.context.rerun_failures_max:
- self.reran_tests += 1
- else:
- # Don't rerun this if the overall number of rerun tests has been
- # reached.
- return
- if (test.run >= 2 and
- result.output.duration > self.context.timeout / 20.0):
- # Rerun slow tests at most once.
- return
-
- # Rerun this test.
- test.run += 1
- pool.add([
- TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
- ])
- self.remaining += 1
- self.total += 1
-
- def _ProcessTest(self, test, result, pool):
- self.outputs[test] = result.output
- has_unexpected_output = result.has_unexpected_output
- if has_unexpected_output:
- self.failed.append(test)
- if result.output.HasCrashed():
- self.crashed += 1
- else:
- self.succeeded += 1
- self.remaining -= 1
- # For the indicator, everything that happens after the first run is treated
- # as unexpected even if it flakily passes in order to include it in the
- # output.
- self.indicator.HasRun(test, result.output,
- has_unexpected_output or test.run > 1)
- if has_unexpected_output:
- # Rerun test failures after the indicator has processed the results.
- self._VerbosePrint("Attempting to rerun test after failure.")
- self._MaybeRerun(pool, test, result)
- # Update the perf database if the test succeeded.
- return not has_unexpected_output
-
- def Run(self, jobs):
- self.indicator.Starting()
- self._RunInternal(jobs)
- self.indicator.Done()
- if self.failed:
- return 1
- elif self.remaining:
- return 2
- return 0
-
- def _RunInternal(self, jobs):
- pool = Pool(jobs)
- test_map = {}
- queued_exception = [None]
- def gen_tests():
- for test in self.tests:
- assert test.id >= 0
- test_map[test.id] = test
- try:
- yield [
- TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
- ]
- except Exception, e:
- # If this failed, save the exception and re-raise it later (after
- # all other tests have had a chance to run).
- queued_exception[0] = e, traceback.format_exc()
- continue
- try:
- it = pool.imap_unordered(
- fn=run_job,
- gen=gen_tests(),
- process_context_fn=MakeProcessContext,
- process_context_args=[self.context.sancov_dir],
- )
- for result in it:
- if result.heartbeat:
- self.indicator.Heartbeat()
- continue
-
- job_result = result.value
- test_id = job_result.id
- outproc_result = job_result.outproc_result
-
- test = test_map[test_id]
- update_perf = self._ProcessTest(test, outproc_result, pool)
- if update_perf:
- self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
- test, outproc_result.output.duration))
- except KeyboardInterrupt:
- raise
- except:
- traceback.print_exc()
- raise
- finally:
- self._VerbosePrint("Closing process pool.")
- pool.terminate()
- self._VerbosePrint("Closing database connection.")
- self._RunPerfSafe(self.perf_data_manager.close)
- if self.perf_failures:
- # Nuke perf data in case of failures. This might not work on windows as
- # some files might still be open.
- print "Deleting perf test data due to db corruption."
- shutil.rmtree(self.datapath)
- if queued_exception[0]:
- e, stacktrace = queued_exception[0]
- print stacktrace
- raise e
-
- def _VerbosePrint(self, text):
- if self.context.verbose:
- print text
- sys.stdout.flush()
-
-
-class BreakNowException(Exception):
- def __init__(self, value):
- super(BreakNowException, self).__init__()
- self.value = value
-
- def __str__(self):
- return repr(self.value)
diff --git a/deps/v8/tools/testrunner/local/perfdata.py b/deps/v8/tools/testrunner/local/perfdata.py
deleted file mode 100644
index 4cb618b0be..0000000000
--- a/deps/v8/tools/testrunner/local/perfdata.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import shelve
-import threading
-
-
-class PerfDataEntry(object):
- def __init__(self):
- self.avg = 0.0
- self.count = 0
-
- def AddResult(self, result):
- kLearnRateLimiter = 99 # Greater value means slower learning.
- # We use an approximation of the average of the last 100 results here:
- # The existing average is weighted with kLearnRateLimiter (or less
- # if there are fewer data points).
- effective_count = min(self.count, kLearnRateLimiter)
- self.avg = self.avg * effective_count + result
- self.count = effective_count + 1
- self.avg /= self.count
-
-
-class PerfDataStore(object):
- def __init__(self, datadir, arch, mode):
- filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode))
- self.database = shelve.open(filename, protocol=2)
- self.closed = False
- self.lock = threading.Lock()
-
- def __del__(self):
- self.close()
-
- def close(self):
- if self.closed: return
- self.database.close()
- self.closed = True
-
- def FetchPerfData(self, test):
- """Returns the observed duration for |test| as read from the store."""
- key = test.get_id()
- if key in self.database:
- return self.database[key].avg
- return None
-
- def UpdatePerfData(self, test, duration):
- """Updates the persisted value in the store with duration."""
- testkey = test.get_id()
- self.RawUpdatePerfData(testkey, duration)
-
- def RawUpdatePerfData(self, testkey, duration):
- with self.lock:
- if testkey in self.database:
- entry = self.database[testkey]
- else:
- entry = PerfDataEntry()
- entry.AddResult(duration)
- self.database[testkey] = entry
-
-
-class PerfDataManager(object):
- def __init__(self, datadir):
- self.datadir = os.path.abspath(datadir)
- if not os.path.exists(self.datadir):
- os.makedirs(self.datadir)
- self.stores = {} # Keyed by arch, then mode.
- self.closed = False
- self.lock = threading.Lock()
-
- def __del__(self):
- self.close()
-
- def close(self):
- if self.closed: return
- for arch in self.stores:
- modes = self.stores[arch]
- for mode in modes:
- store = modes[mode]
- store.close()
- self.closed = True
-
- def GetStore(self, arch, mode):
- with self.lock:
- if not arch in self.stores:
- self.stores[arch] = {}
- modes = self.stores[arch]
- if not mode in modes:
- modes[mode] = PerfDataStore(self.datadir, arch, mode)
- return modes[mode]
-
-
-class NullPerfDataStore(object):
- def UpdatePerfData(self, test, duration):
- pass
-
- def FetchPerfData(self, test):
- return None
-
-
-class NullPerfDataManager(object):
- def __init__(self):
- pass
-
- def GetStore(self, *args, **kwargs):
- return NullPerfDataStore()
-
- def close(self):
- pass
-
-
-def GetPerfDataManager(context, datadir):
- if context.use_perf_data:
- return PerfDataManager(datadir)
- else:
- return NullPerfDataManager()
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 9199b62d8a..7c9a250bc3 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -4,42 +4,38 @@
# found in the LICENSE file.
from Queue import Empty
-from multiprocessing import Event, Process, Queue
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import os
+import signal
+import time
import traceback
+from . import command
+
def setup_testing():
"""For testing only: Use threading under the hood instead of multiprocessing
to make coverage work.
"""
global Queue
- global Event
global Process
del Queue
- del Event
del Process
from Queue import Queue
- from threading import Event
from threading import Thread as Process
+ # Monkeypatch threading Queue to look like multiprocessing Queue.
+ Queue.cancel_join_thread = lambda self: None
class NormalResult():
def __init__(self, result):
self.result = result
- self.exception = False
- self.break_now = False
-
+ self.exception = None
class ExceptionResult():
- def __init__(self):
- self.exception = True
- self.break_now = False
-
-
-class BreakResult():
- def __init__(self):
- self.exception = False
- self.break_now = True
+ def __init__(self, exception):
+ self.exception = exception
class MaybeResult():
@@ -56,26 +52,43 @@ class MaybeResult():
return MaybeResult(False, value)
-def Worker(fn, work_queue, done_queue, done,
+def Worker(fn, work_queue, done_queue,
process_context_fn=None, process_context_args=None):
"""Worker to be run in a child process.
- The worker stops on two conditions. 1. When the poison pill "STOP" is
- reached or 2. when the event "done" is set."""
+ The worker stops when the poison pill "STOP" is reached.
+ """
try:
kwargs = {}
if process_context_fn and process_context_args is not None:
kwargs.update(process_context=process_context_fn(*process_context_args))
for args in iter(work_queue.get, "STOP"):
- if done.is_set():
- break
try:
done_queue.put(NormalResult(fn(*args, **kwargs)))
+ except command.AbortException:
+ # SIGINT, SIGTERM or internal hard timeout.
+ break
except Exception, e:
traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
- done_queue.put(ExceptionResult())
+ done_queue.put(ExceptionResult(e))
+ # When we reach here on normal tear down, all items have been pulled from
+ # the done_queue before and this should have no effect. On fast abort, it's
+ # possible that a fast worker left items on the done_queue in memory, which
+ # will never be pulled. This call purges those to avoid a deadlock.
+ done_queue.cancel_join_thread()
except KeyboardInterrupt:
- done_queue.put(BreakResult())
+ assert False, 'Unreachable'
+
+
+@contextmanager
+def without_sig():
+ int_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
+ term_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ try:
+ yield
+ finally:
+ signal.signal(signal.SIGINT, int_handler)
+ signal.signal(signal.SIGTERM, term_handler)
class Pool():
@@ -88,24 +101,28 @@ class Pool():
# Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
BUFFER_FACTOR = 4
- def __init__(self, num_workers, heartbeat_timeout=30):
+ def __init__(self, num_workers, heartbeat_timeout=1):
self.num_workers = num_workers
self.processes = []
self.terminated = False
+ self.abort_now = False
- # Invariant: count >= #work_queue + #done_queue. It is greater when a
- # worker takes an item from the work_queue and before the result is
+ # Invariant: processing_count >= #work_queue + #done_queue. It is greater
+ # when a worker takes an item from the work_queue and before the result is
# submitted to the done_queue. It is equal when no worker is working,
# e.g. when all workers have finished, and when no results are processed.
# Count is only accessed by the parent process. Only the parent process is
# allowed to remove items from the done_queue and to add items to the
# work_queue.
- self.count = 0
- self.work_queue = Queue()
- self.done_queue = Queue()
- self.done = Event()
+ self.processing_count = 0
self.heartbeat_timeout = heartbeat_timeout
+ # Disable sigint and sigterm to prevent subprocesses from capturing the
+ # signals.
+ with without_sig():
+ self.work_queue = Queue()
+ self.done_queue = Queue()
+
def imap_unordered(self, fn, gen,
process_context_fn=None, process_context_args=None):
"""Maps function "fn" to items in generator "gen" on the worker processes
@@ -123,58 +140,63 @@ class Pool():
process_context_fn. All arguments will be pickled and sent beyond the
process boundary.
"""
+ if self.terminated:
+ return
try:
internal_error = False
gen = iter(gen)
self.advance = self._advance_more
- for w in xrange(self.num_workers):
- p = Process(target=Worker, args=(fn,
- self.work_queue,
- self.done_queue,
- self.done,
- process_context_fn,
- process_context_args))
- p.start()
- self.processes.append(p)
+ # Disable sigint and sigterm to prevent subprocesses from capturing the
+ # signals.
+ with without_sig():
+ for w in xrange(self.num_workers):
+ p = Process(target=Worker, args=(fn,
+ self.work_queue,
+ self.done_queue,
+ process_context_fn,
+ process_context_args))
+ p.start()
+ self.processes.append(p)
self.advance(gen)
- while self.count > 0:
+ while self.processing_count > 0:
while True:
try:
- result = self.done_queue.get(timeout=self.heartbeat_timeout)
- break
- except Empty:
- # Indicate a heartbeat. The iterator will continue fetching the
- # next result.
- yield MaybeResult.create_heartbeat()
- self.count -= 1
- if result.exception:
- # TODO(machenbach): Handle a few known types of internal errors
- # gracefully, e.g. missing test files.
- internal_error = True
- continue
- elif result.break_now:
- # A keyboard interrupt happened in one of the worker processes.
- raise KeyboardInterrupt
- else:
- yield MaybeResult.create_result(result.result)
+ # Read from result queue in a responsive fashion. If available,
+ # this will return a normal result immediately or a heartbeat on
+ # heartbeat timeout (default 1 second).
+ result = self._get_result_from_queue()
+ except:
+ # TODO(machenbach): Handle a few known types of internal errors
+ # gracefully, e.g. missing test files.
+ internal_error = True
+ continue
+
+ if self.abort_now:
+ # SIGINT, SIGTERM or internal hard timeout.
+ return
+
+ yield result
+ break
+
self.advance(gen)
except KeyboardInterrupt:
- raise
+ assert False, 'Unreachable'
except Exception as e:
traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
finally:
- self.terminate()
+ self._terminate()
+
if internal_error:
raise Exception("Internal error in a worker process.")
def _advance_more(self, gen):
- while self.count < self.num_workers * self.BUFFER_FACTOR:
+ while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
try:
self.work_queue.put(gen.next())
- self.count += 1
+ self.processing_count += 1
except StopIteration:
self.advance = self._advance_empty
break
@@ -185,27 +207,51 @@ class Pool():
def add(self, args):
"""Adds an item to the work queue. Can be called dynamically while
processing the results from imap_unordered."""
+ assert not self.terminated
+
self.work_queue.put(args)
- self.count += 1
+ self.processing_count += 1
+
+ def abort(self):
+ """Schedules abort on next queue read.
+
+ This is safe to call when handling SIGINT, SIGTERM or when an internal
+ hard timeout is reached.
+ """
+ self.abort_now = True
- def terminate(self):
+ def _terminate(self):
+ """Terminates execution and cleans up the queues.
+
+ If abort() was called before termination, this also terminates the
+ subprocesses and doesn't wait for ongoing tests.
+ """
if self.terminated:
return
self.terminated = True
- # For exceptional tear down set the "done" event to stop the workers before
- # they empty the queue buffer.
- self.done.set()
+ # Drain out work queue from tests
+ try:
+ while True:
+ self.work_queue.get(True, 0.1)
+ except Empty:
+ pass
- for p in self.processes:
+ # Make sure all processes stop
+ for _ in self.processes:
# During normal tear down the workers block on get(). Feed a poison pill
# per worker to make them stop.
self.work_queue.put("STOP")
+ if self.abort_now:
+ for p in self.processes:
+ os.kill(p.pid, signal.SIGTERM)
+
for p in self.processes:
p.join()
- # Drain the queues to prevent failures when queues are garbage collected.
+ # Drain the queues to prevent stderr chatter when queues are garbage
+ # collected.
try:
while True: self.work_queue.get(False)
except:
@@ -214,3 +260,22 @@ class Pool():
while True: self.done_queue.get(False)
except:
pass
+
+ def _get_result_from_queue(self):
+ """Attempts to get the next result from the queue.
+
+ Returns: A wrapped result if one was available within heartbeat timeout,
+ a heartbeat result otherwise.
+ Raises:
+ Exception: If an exception occured when processing the task on the
+ worker side, it is reraised here.
+ """
+ while True:
+ try:
+ result = self.done_queue.get(timeout=self.heartbeat_timeout)
+ self.processing_count -= 1
+ if result.exception:
+ raise result.exception
+ return MaybeResult.create_result(result.result)
+ except Empty:
+ return MaybeResult.create_heartbeat()
diff --git a/deps/v8/tools/testrunner/local/pool_unittest.py b/deps/v8/tools/testrunner/local/pool_unittest.py
index 235eca6393..240cd563f8 100644..100755
--- a/deps/v8/tools/testrunner/local/pool_unittest.py
+++ b/deps/v8/tools/testrunner/local/pool_unittest.py
@@ -3,9 +3,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import os
+import sys
import unittest
-from pool import Pool
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.pool import Pool
def Run(x):
if x == 10:
@@ -17,6 +24,9 @@ class PoolTest(unittest.TestCase):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+ if result.heartbeat:
+ # Any result can be a heartbeat due to timings.
+ continue
results.add(result.value)
self.assertEquals(set(range(0, 10)), results)
@@ -25,6 +35,9 @@ class PoolTest(unittest.TestCase):
pool = Pool(3)
with self.assertRaises(Exception):
for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
+ if result.heartbeat:
+ # Any result can be a heartbeat due to timings.
+ continue
# Item 10 will not appear in results due to an internal exception.
results.add(result.value)
expect = set(range(0, 12))
@@ -35,8 +48,15 @@ class PoolTest(unittest.TestCase):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+ if result.heartbeat:
+ # Any result can be a heartbeat due to timings.
+ continue
results.add(result.value)
if result.value < 30:
pool.add([result.value + 20])
self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
results)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
deleted file mode 100644
index f6ebddf2e5..0000000000
--- a/deps/v8/tools/testrunner/local/progress.py
+++ /dev/null
@@ -1,452 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-from functools import wraps
-import json
-import os
-import sys
-import time
-
-from . import junit_output
-from . import statusfile
-from ..testproc import progress as progress_proc
-
-
-class ProgressIndicator(object):
-
- def __init__(self):
- self.runner = None
-
- def SetRunner(self, runner):
- self.runner = runner
-
- def Starting(self):
- pass
-
- def Done(self):
- pass
-
- def HasRun(self, test, output, has_unexpected_output):
- pass
-
- def Heartbeat(self):
- pass
-
- def PrintFailureHeader(self, test):
- if test.output_proc.negative:
- negative_marker = '[negative] '
- else:
- negative_marker = ''
- print "=== %(label)s %(negative)s===" % {
- 'label': test,
- 'negative': negative_marker,
- }
-
- def ToProgressIndicatorProc(self):
- print ('Warning: %s is not available as a processor' %
- self.__class__.__name__)
- return None
-
-
-class IndicatorNotifier(object):
- """Holds a list of progress indicators and notifies them all on events."""
- def __init__(self):
- self.indicators = []
-
- def Register(self, indicator):
- self.indicators.append(indicator)
-
- def ToProgressIndicatorProcs(self):
- return [i.ToProgressIndicatorProc() for i in self.indicators]
-
-
-# Forge all generic event-dispatching methods in IndicatorNotifier, which are
-# part of the ProgressIndicator interface.
-for func_name in ProgressIndicator.__dict__:
- func = getattr(ProgressIndicator, func_name)
- if callable(func) and not func.__name__.startswith('_'):
- def wrap_functor(f):
- @wraps(f)
- def functor(self, *args, **kwargs):
- """Generic event dispatcher."""
- for indicator in self.indicators:
- getattr(indicator, f.__name__)(*args, **kwargs)
- return functor
- setattr(IndicatorNotifier, func_name, wrap_functor(func))
-
-
-class SimpleProgressIndicator(ProgressIndicator):
- """Abstract base class for {Verbose,Dots}ProgressIndicator"""
-
- def Starting(self):
- print 'Running %i tests' % self.runner.total
-
- def Done(self):
- print
- for failed in self.runner.failed:
- output = self.runner.outputs[failed]
- self.PrintFailureHeader(failed)
- if output.stderr:
- print "--- stderr ---"
- print output.stderr.strip()
- if output.stdout:
- print "--- stdout ---"
- print output.stdout.strip()
- print "Command: %s" % failed.cmd.to_string()
- if output.HasCrashed():
- print "exit code: %d" % output.exit_code
- print "--- CRASHED ---"
- if output.HasTimedOut():
- print "--- TIMEOUT ---"
- if len(self.runner.failed) == 0:
- print "==="
- print "=== All tests succeeded"
- print "==="
- else:
- print
- print "==="
- print "=== %i tests failed" % len(self.runner.failed)
- if self.runner.crashed > 0:
- print "=== %i tests CRASHED" % self.runner.crashed
- print "==="
-
-
-class VerboseProgressIndicator(SimpleProgressIndicator):
-
- def HasRun(self, test, output, has_unexpected_output):
- if has_unexpected_output:
- if output.HasCrashed():
- outcome = 'CRASH'
- else:
- outcome = 'FAIL'
- else:
- outcome = 'pass'
- print 'Done running %s: %s' % (test, outcome)
- sys.stdout.flush()
-
- def Heartbeat(self):
- print 'Still working...'
- sys.stdout.flush()
-
- def ToProgressIndicatorProc(self):
- return progress_proc.VerboseProgressIndicator()
-
-
-class DotsProgressIndicator(SimpleProgressIndicator):
-
- def HasRun(self, test, output, has_unexpected_output):
- total = self.runner.succeeded + len(self.runner.failed)
- if (total > 1) and (total % 50 == 1):
- sys.stdout.write('\n')
- if has_unexpected_output:
- if output.HasCrashed():
- sys.stdout.write('C')
- sys.stdout.flush()
- elif output.HasTimedOut():
- sys.stdout.write('T')
- sys.stdout.flush()
- else:
- sys.stdout.write('F')
- sys.stdout.flush()
- else:
- sys.stdout.write('.')
- sys.stdout.flush()
-
- def ToProgressIndicatorProc(self):
- return progress_proc.DotsProgressIndicator()
-
-
-class CompactProgressIndicator(ProgressIndicator):
- """Abstract base class for {Color,Monochrome}ProgressIndicator"""
-
- def __init__(self, templates):
- super(CompactProgressIndicator, self).__init__()
- self.templates = templates
- self.last_status_length = 0
- self.start_time = time.time()
-
- def Done(self):
- self.PrintProgress('Done')
- print "" # Line break.
-
- def HasRun(self, test, output, has_unexpected_output):
- self.PrintProgress(str(test))
- if has_unexpected_output:
- self.ClearLine(self.last_status_length)
- self.PrintFailureHeader(test)
- stdout = output.stdout.strip()
- if len(stdout):
- print self.templates['stdout'] % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- print self.templates['stderr'] % stderr
- print "Command: %s" % test.cmd.to_string()
- if output.HasCrashed():
- print "exit code: %d" % output.exit_code
- print "--- CRASHED ---"
- if output.HasTimedOut():
- print "--- TIMEOUT ---"
-
- def Truncate(self, string, length):
- if length and (len(string) > (length - 3)):
- return string[:(length - 3)] + "..."
- else:
- return string
-
- def PrintProgress(self, name):
- self.ClearLine(self.last_status_length)
- elapsed = time.time() - self.start_time
- progress = 0 if not self.runner.total else (
- ((self.runner.total - self.runner.remaining) * 100) //
- self.runner.total)
- status = self.templates['status_line'] % {
- 'passed': self.runner.succeeded,
- 'progress': progress,
- 'failed': len(self.runner.failed),
- 'test': name,
- 'mins': int(elapsed) / 60,
- 'secs': int(elapsed) % 60
- }
- status = self.Truncate(status, 78)
- self.last_status_length = len(status)
- print status,
- sys.stdout.flush()
-
-
-class ColorProgressIndicator(CompactProgressIndicator):
-
- def __init__(self):
- templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|"
- "\033[34m%%%(progress) 4d\033[0m|"
- "\033[32m+%(passed) 4d\033[0m|"
- "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
- 'stdout': "\033[1m%s\033[0m",
- 'stderr': "\033[31m%s\033[0m",
- }
- super(ColorProgressIndicator, self).__init__(templates)
-
- def ClearLine(self, last_line_length):
- print "\033[1K\r",
-
- def ToProgressIndicatorProc(self):
- return progress_proc.ColorProgressIndicator()
-
-
-class MonochromeProgressIndicator(CompactProgressIndicator):
-
- def __init__(self):
- templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
- "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
- 'stdout': '%s',
- 'stderr': '%s',
- }
- super(MonochromeProgressIndicator, self).__init__(templates)
-
- def ClearLine(self, last_line_length):
- print ("\r" + (" " * last_line_length) + "\r"),
-
- def ToProgressIndicatorProc(self):
- return progress_proc.MonochromeProgressIndicator()
-
-
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self.junitout = junitout
- self.juinttestsuite = junittestsuite
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def Done(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
- def HasRun(self, test, output, has_unexpected_output):
- fail_text = ""
- if has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % test.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=test.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def ToProgressIndicatorProc(self):
- if self.outfile != sys.stdout:
- self.outfile.close()
- return progress_proc.JUnitTestProgressIndicator(self.junitout,
- self.junittestsuite)
-
-
-class JsonTestProgressIndicator(ProgressIndicator):
-
- def __init__(self, json_test_results, arch, mode, random_seed):
- super(JsonTestProgressIndicator, self).__init__()
- self.json_test_results = json_test_results
- self.arch = arch
- self.mode = mode
- self.random_seed = random_seed
- self.results = []
- self.tests = []
-
- def ToProgressIndicatorProc(self):
- return progress_proc.JsonTestProgressIndicator(
- self.json_test_results, self.arch, self.mode, self.random_seed)
-
- def Done(self):
- complete_results = []
- if os.path.exists(self.json_test_results):
- with open(self.json_test_results, "r") as f:
- # Buildbot might start out with an empty file.
- complete_results = json.loads(f.read() or "[]")
-
- duration_mean = None
- if self.tests:
- # Get duration mean.
- duration_mean = (
- sum(duration for (_, duration) in self.tests) /
- float(len(self.tests)))
-
- # Sort tests by duration.
- self.tests.sort(key=lambda (_, duration): duration, reverse=True)
- slowest_tests = [
- {
- "name": str(test),
- "flags": test.cmd.args,
- "command": test.cmd.to_string(relative=True),
- "duration": duration,
- "marked_slow": test.is_slow,
- } for (test, duration) in self.tests[:20]
- ]
-
- complete_results.append({
- "arch": self.arch,
- "mode": self.mode,
- "results": self.results,
- "slowest_tests": slowest_tests,
- "duration_mean": duration_mean,
- "test_total": len(self.tests),
- })
-
- with open(self.json_test_results, "w") as f:
- f.write(json.dumps(complete_results))
-
- def HasRun(self, test, output, has_unexpected_output):
- # Buffer all tests for sorting the durations in the end.
- self.tests.append((test, output.duration))
- if not has_unexpected_output:
- # Omit tests that run as expected. Passing tests of reruns after failures
- # will have unexpected_output to be reported here has well.
- return
-
- self.results.append({
- "name": str(test),
- "flags": test.cmd.args,
- "command": test.cmd.to_string(relative=True),
- "run": test.run,
- "stdout": output.stdout,
- "stderr": output.stderr,
- "exit_code": output.exit_code,
- "result": test.output_proc.get_outcome(output),
- "expected": test.expected_outcomes,
- "duration": output.duration,
-
- # TODO(machenbach): This stores only the global random seed from the
- # context and not possible overrides when using random-seed stress.
- "random_seed": self.random_seed,
- "target_name": test.get_shell(),
- "variant": test.variant,
- })
-
-
-class FlakinessTestProgressIndicator(ProgressIndicator):
-
- def __init__(self, json_test_results):
- super(FlakinessTestProgressIndicator, self).__init__()
- self.json_test_results = json_test_results
- self.results = {}
- self.summary = {
- "PASS": 0,
- "FAIL": 0,
- "CRASH": 0,
- "TIMEOUT": 0,
- }
- self.seconds_since_epoch = time.time()
-
- def Done(self):
- with open(self.json_test_results, "w") as f:
- json.dump({
- "interrupted": False,
- "num_failures_by_type": self.summary,
- "path_delimiter": "/",
- "seconds_since_epoch": self.seconds_since_epoch,
- "tests": self.results,
- "version": 3,
- }, f)
-
- def HasRun(self, test, output, has_unexpected_output):
- key = test.get_id()
- outcome = test.output_proc.get_outcome(output)
- assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
- if test.run == 1:
- # First run of this test.
- self.results[key] = {
- "actual": outcome,
- "expected": " ".join(test.expected_outcomes),
- "times": [output.duration],
- }
- self.summary[outcome] = self.summary[outcome] + 1
- else:
- # This is a rerun and a previous result exists.
- result = self.results[key]
- result["actual"] = "%s %s" % (result["actual"], outcome)
- result["times"].append(output.duration)
-
-
-PROGRESS_INDICATORS = {
- 'verbose': VerboseProgressIndicator,
- 'dots': DotsProgressIndicator,
- 'color': ColorProgressIndicator,
- 'mono': MonochromeProgressIndicator
-}
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 988750d6b4..e3adaa298a 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -34,8 +34,8 @@ from utils import Freeze
# Possible outcomes
FAIL = "FAIL"
PASS = "PASS"
-TIMEOUT = "TIMEOUT" # TODO(majeski): unused in status files
-CRASH = "CRASH" # TODO(majeski): unused in status files
+TIMEOUT = "TIMEOUT"
+CRASH = "CRASH"
# Outcomes only for status file, need special handling
FAIL_OK = "FAIL_OK"
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 6a9e9831ce..4bdfd008fe 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -34,32 +34,12 @@ from . import command
from . import statusfile
from . import utils
from ..objects.testcase import TestCase
-from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
STANDARD_VARIANT = set(["default"])
-class LegacyVariantsGenerator(object):
- def __init__(self, suite, variants):
- self.suite = suite
- self.all_variants = ALL_VARIANTS & variants
- self.standard_variant = STANDARD_VARIANT & variants
-
- def FilterVariantsByTest(self, test):
- if test.only_standard_variant:
- return self.standard_variant
- return self.all_variants
-
- def GetFlagSets(self, test, variant):
- return ALL_VARIANT_FLAGS[variant]
-
-
-class StandardLegacyVariantsGenerator(LegacyVariantsGenerator):
- def FilterVariantsByTest(self, testcase):
- return self.standard_variant
-
-
class VariantsGenerator(object):
def __init__(self, variants):
self._all_variants = [v for v in variants if v in ALL_VARIANTS]
@@ -80,45 +60,58 @@ class VariantsGenerator(object):
return self._all_variants
+class TestCombiner(object):
+ def get_group_key(self, test):
+ """To indicate what tests can be combined with each other we define a group
+ key for each test. Tests with the same group key can be combined. Test
+ without a group key (None) is not combinable with any other test.
+ """
+ raise NotImplementedError()
+
+ def combine(self, name, tests):
+ """Returns test combined from `tests`. Since we identify tests by their
+ suite and name, `name` parameter should be unique within one suite.
+ """
+ return self._combined_test_class()(name, tests)
+
+ def _combined_test_class(self):
+ raise NotImplementedError()
+
+
class TestSuite(object):
@staticmethod
- def LoadTestSuite(root):
+ def LoadTestSuite(root, test_config):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description)
- return module.GetSuite(name, root)
+ return module.GetSuite(name, root, test_config)
finally:
if f:
f.close()
- def __init__(self, name, root):
- # Note: This might be called concurrently from different processes.
+ def __init__(self, name, root, test_config):
self.name = name # string
self.root = root # string containing path
+ self.test_config = test_config
self.tests = None # list of TestCase objects
self.statusfile = None
+ self.suppress_internals = False
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
- def ListTests(self, context):
- raise NotImplementedError
-
- def _LegacyVariantsGeneratorFactory(self):
- """The variant generator class to be used."""
- return LegacyVariantsGenerator
+ def do_suppress_internals(self):
+ """Specifies if this test suite should suppress asserts based on internals.
- def CreateLegacyVariantsGenerator(self, variants):
- """Return a generator for the testing variants of this suite.
-
- Args:
- variants: List of variant names to be run as specified by the test
- runner.
- Returns: An object of type LegacyVariantsGenerator.
+ Internals are e.g. testing against the outcome of native runtime functions.
+ This is switched off on some fuzzers that violate these contracts.
"""
- return self._LegacyVariantsGeneratorFactory()(self, set(variants))
+ self.suppress_internals = True
+
+ def ListTests(self):
+ raise NotImplementedError
def get_variants_gen(self, variants):
return self._variants_gen_class()(variants)
@@ -126,11 +119,26 @@ class TestSuite(object):
def _variants_gen_class(self):
return VariantsGenerator
+ def test_combiner_available(self):
+ return bool(self._test_combiner_class())
+
+ def get_test_combiner(self):
+ cls = self._test_combiner_class()
+ if cls:
+ return cls()
+ return None
+
+ def _test_combiner_class(self):
+ """Returns Combiner subclass. None if suite doesn't support combining
+ tests.
+ """
+ return None
+
def ReadStatusFile(self, variables):
self.statusfile = statusfile.StatusFile(self.status_file(), variables)
- def ReadTestCases(self, context):
- self.tests = self.ListTests(context)
+ def ReadTestCases(self):
+ self.tests = self.ListTests()
def FilterTestCasesByStatus(self,
@@ -196,8 +204,19 @@ class TestSuite(object):
self.tests = filtered
def _create_test(self, path, **kwargs):
- test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
- return test
+ if self.suppress_internals:
+ test_class = self._suppressed_test_class()
+ else:
+ test_class = self._test_class()
+ return test_class(self, path, self._path_to_name(path), self.test_config,
+ **kwargs)
+
+ def _suppressed_test_class(self):
+ """Optional testcase that suppresses assertions. Used by fuzzers that are
+ only interested in dchecks or tsan and that might violate the assertions
+ through fuzzing.
+ """
+ return self._test_class()
def _test_class(self):
raise NotImplementedError
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index bf8c3d9f7e..9834386d01 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -36,6 +36,21 @@ import subprocess
import urllib2
+### Exit codes and their meaning.
+# Normal execution.
+EXIT_CODE_PASS = 0
+# Execution with test failures.
+EXIT_CODE_FAILURES = 1
+# Execution with no tests executed.
+EXIT_CODE_NO_TESTS = 2
+# Execution aborted with SIGINT (Ctrl-C).
+EXIT_CODE_INTERRUPTED = 3
+# Execution aborted with SIGTERM.
+EXIT_CODE_TERMINATED = 4
+# Internal error.
+EXIT_CODE_INTERNAL_ERROR = 5
+
+
def GetSuitePaths(test_root):
return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index f1e9ad301e..25de235da1 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -22,8 +22,31 @@ ALL_VARIANT_FLAGS = {
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"trusted": [["--no-untrusted-code-mitigations"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
+ "wasm_traps": [["--wasm-trap-handler", "--invoke-weak-callbacks"]],
"wasm_no_native": [["--no-wasm-jit-to-native"]],
}
-ALL_VARIANTS = set(ALL_VARIANT_FLAGS.keys())
+SLOW_VARIANTS = set([
+ 'stress',
+ 'nooptimization',
+])
+
+FAST_VARIANTS = set([
+ 'default'
+])
+
+
+def _variant_order_key(v):
+ if v in SLOW_VARIANTS:
+ return 0
+ if v in FAST_VARIANTS:
+ return 100
+ return 50
+
+ALL_VARIANTS = sorted(ALL_VARIANT_FLAGS.keys(),
+ key=_variant_order_key)
+
+# Check {SLOW,FAST}_VARIANTS entries
+for variants in [SLOW_VARIANTS, FAST_VARIANTS]:
+ for v in variants:
+ assert v in ALL_VARIANT_FLAGS
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
new file mode 100755
index 0000000000..77effc1847
--- /dev/null
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -0,0 +1,225 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import random
+import sys
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import utils
+
+from testrunner.testproc import fuzzer
+from testrunner.testproc.base import TestProcProducer
+from testrunner.testproc.combiner import CombinerProc
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import ResultsTracker, TestsCounter
+from testrunner.utils import random_utils
+
+
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
+
+
+class NumFuzzer(base_runner.BaseTestRunner):
+ def __init__(self, *args, **kwargs):
+ super(NumFuzzer, self).__init__(*args, **kwargs)
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--fuzzer-random-seed", default=0,
+ help="Default seed for initializing fuzzer random "
+ "generator")
+ parser.add_option("--tests-count", default=5, type="int",
+ help="Number of tests to generate from each base test. "
+ "Can be combined with --total-timeout-sec with "
+ "value 0 to provide infinite number of subtests. "
+ "When --combine-tests is set it indicates how many "
+ "tests to create in total")
+
+ # Stress gc
+ parser.add_option("--stress-marking", default=0, type="int",
+ help="probability [0-10] of adding --stress-marking "
+ "flag to the test")
+ parser.add_option("--stress-scavenge", default=0, type="int",
+ help="probability [0-10] of adding --stress-scavenge "
+ "flag to the test")
+ parser.add_option("--stress-compaction", default=0, type="int",
+ help="probability [0-10] of adding --stress-compaction "
+ "flag to the test")
+ parser.add_option("--stress-gc", default=0, type="int",
+ help="probability [0-10] of adding --random-gc-interval "
+ "flag to the test")
+ parser.add_option("--stress-thread-pool-size", default=0, type="int",
+ help="probability [0-10] of adding --thread-pool-size "
+ "flag to the test")
+
+ # Stress deopt
+ parser.add_option("--stress-deopt", default=0, type="int",
+ help="probability [0-10] of adding --deopt-every-n-times "
+ "flag to the test")
+ parser.add_option("--stress-deopt-min", default=1, type="int",
+ help="extends --stress-deopt to have minimum interval "
+ "between deopt points")
+
+ # Stress interrupt budget
+ parser.add_option("--stress-interrupt-budget", default=0, type="int",
+ help="probability [0-10] of adding --interrupt-budget "
+ "flag to the test")
+
+ # Combine multiple tests
+ parser.add_option("--combine-tests", default=False, action="store_true",
+ help="Combine multiple tests as one and run with "
+ "try-catch wrapper")
+ parser.add_option("--combine-max", default=100, type="int",
+ help="Maximum number of tests to combine")
+ parser.add_option("--combine-min", default=2, type="int",
+ help="Minimum number of tests to combine")
+
+ return parser
+
+
+ def _process_options(self, options):
+ if not options.fuzzer_random_seed:
+ options.fuzzer_random_seed = random_utils.random_seed()
+
+ if options.total_timeout_sec:
+ options.tests_count = 0
+
+ if options.combine_tests:
+ if options.combine_min > options.combine_max:
+ print ('min_group_size (%d) cannot be larger than max_group_size (%d)' %
+ options.min_group_size, options.max_group_size)
+ raise base_runner.TestRunnerError()
+
+ return True
+
+ def _get_default_suite_names(self):
+ return DEFAULT_SUITES
+
+ def _timeout_scalefactor(self, options):
+ factor = super(NumFuzzer, self)._timeout_scalefactor(options)
+ if options.stress_interrupt_budget:
+ # TODO(machenbach): This should be moved to a more generic config.
+ # Fuzzers have too much timeout in debug mode.
+ factor = max(int(factor * 0.25), 1)
+ return factor
+
+ def _get_statusfile_variables(self, options):
+ variables = (
+ super(NumFuzzer, self)._get_statusfile_variables(options))
+ variables.update({
+ 'deopt_fuzzer': bool(options.stress_deopt),
+ 'endurance_fuzzer': bool(options.combine_tests),
+ 'gc_stress': bool(options.stress_gc),
+ 'gc_fuzzer': bool(max([options.stress_marking,
+ options.stress_scavenge,
+ options.stress_compaction,
+ options.stress_gc,
+ options.stress_thread_pool_size])),
+ })
+ return variables
+
+ def _do_execute(self, tests, args, options):
+ loader = LoadProc()
+ fuzzer_rng = random.Random(options.fuzzer_random_seed)
+
+ combiner = self._create_combiner(fuzzer_rng, options)
+ results = ResultsTracker()
+ execproc = ExecutionProc(options.j)
+ sigproc = self._create_signal_proc()
+ indicators = self._create_progress_indicators(options)
+ procs = [
+ loader,
+ NameFilterProc(args) if args else None,
+ StatusFileFilterProc(None, None),
+ # TODO(majeski): Improve sharding when combiner is present. Maybe select
+ # different random seeds for shards instead of splitting tests.
+ self._create_shard_proc(options),
+ ForgiveTimeoutProc(),
+ combiner,
+ self._create_fuzzer(fuzzer_rng, options),
+ sigproc,
+ ] + indicators + [
+ results,
+ self._create_timeout_proc(options),
+ self._create_rerun_proc(options),
+ execproc,
+ ]
+ self._prepare_procs(procs)
+ loader.load_tests(tests)
+
+ # TODO(majeski): maybe some notification from loader would be better?
+ if combiner:
+ combiner.generate_initial_tests(options.j * 4)
+
+ # This starts up worker processes and blocks until all tests are
+ # processed.
+ execproc.run()
+
+ for indicator in indicators:
+ indicator.finished()
+
+ print '>>> %d tests ran' % results.total
+ if results.failed:
+ return utils.EXIT_CODE_FAILURES
+
+ # Indicate if a SIGINT or SIGTERM happened.
+ return sigproc.exit_code
+
+ def _load_suites(self, names, options):
+ suites = super(NumFuzzer, self)._load_suites(names, options)
+ if options.combine_tests:
+ suites = [s for s in suites if s.test_combiner_available()]
+ if options.stress_interrupt_budget:
+ # Changing interrupt budget forces us to suppress certain test assertions.
+ for suite in suites:
+ suite.do_suppress_internals()
+ return suites
+
+ def _create_combiner(self, rng, options):
+ if not options.combine_tests:
+ return None
+ return CombinerProc(rng, options.combine_min, options.combine_max,
+ options.tests_count)
+
+ def _create_fuzzer(self, rng, options):
+ return fuzzer.FuzzerProc(
+ rng,
+ self._tests_count(options),
+ self._create_fuzzer_configs(options),
+ self._disable_analysis(options),
+ )
+
+ def _tests_count(self, options):
+ if options.combine_tests:
+ return 1
+ return options.tests_count
+
+ def _disable_analysis(self, options):
+ """Disable analysis phase when options are used that don't support it."""
+ return options.combine_tests or options.stress_interrupt_budget
+
+ def _create_fuzzer_configs(self, options):
+ fuzzers = []
+ def add(name, prob, *args):
+ if prob:
+ fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
+
+ add('compaction', options.stress_compaction)
+ add('marking', options.stress_marking)
+ add('scavenge', options.stress_scavenge)
+ add('gc_interval', options.stress_gc)
+ add('threads', options.stress_thread_pool_size)
+ add('interrupt_budget', options.stress_interrupt_budget)
+ add('deopt', options.stress_deopt, options.stress_deopt_min)
+ return fuzzers
+
+
+if __name__ == '__main__':
+ sys.exit(NumFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
deleted file mode 100644
index a3dd56d2dd..0000000000
--- a/deps/v8/tools/testrunner/objects/context.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class Context():
- def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
- isolates, command_prefix, extra_flags, noi18n, random_seed,
- no_sorting, rerun_failures_count, rerun_failures_max, no_harness,
- use_perf_data, sancov_dir, infra_staging=False):
- self.arch = arch
- self.mode = mode
- self.shell_dir = shell_dir
- self.mode_flags = mode_flags
- self.verbose = verbose
- self.timeout = timeout
- self.isolates = isolates
- self.command_prefix = command_prefix
- self.extra_flags = extra_flags
- self.noi18n = noi18n
- self.random_seed = random_seed
- self.no_sorting = no_sorting
- self.rerun_failures_count = rerun_failures_count
- self.rerun_failures_max = rerun_failures_max
- self.no_harness = no_harness
- self.use_perf_data = use_perf_data
- self.sancov_dir = sancov_dir
- self.infra_staging = infra_staging
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index adc33c9f12..74cec56a85 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -27,9 +27,11 @@
import signal
+import copy
from ..local import utils
+
class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
@@ -40,6 +42,13 @@ class Output(object):
self.pid = pid
self.duration = duration
+ def without_text(self):
+ """Returns copy of the output without stdout and stderr."""
+ other = copy.copy(self)
+ other.stdout = None
+ other.stderr = None
+ return other
+
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
diff --git a/deps/v8/tools/testrunner/objects/predictable.py b/deps/v8/tools/testrunner/objects/predictable.py
index ad93077be9..48279d625c 100644
--- a/deps/v8/tools/testrunner/objects/predictable.py
+++ b/deps/v8/tools/testrunner/objects/predictable.py
@@ -4,6 +4,7 @@
from ..local import statusfile
from ..outproc import base as outproc_base
+from ..testproc import base as testproc_base
from ..testproc.result import Result
@@ -15,11 +16,7 @@ from ..testproc.result import Result
def get_outproc(test):
- output_proc = test.output_proc
- if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
- # TODO(majeski): Skip these tests instead of having special outproc.
- return NeverUnexpectedOutputOutProc(output_proc)
- return OutProc(output_proc)
+ return OutProc(test.output_proc)
class OutProc(outproc_base.BaseOutProc):
@@ -31,9 +28,6 @@ class OutProc(outproc_base.BaseOutProc):
super(OutProc, self).__init__()
self._outproc = _outproc
- def process(self, output):
- return Result(self.has_unexpected_output(output), output)
-
def has_unexpected_output(self, output):
return output.exit_code != 0
@@ -49,9 +43,7 @@ class OutProc(outproc_base.BaseOutProc):
return self._outproc.expected_outcomes
-class NeverUnexpectedOutputOutProc(OutProc):
- """Output processor wrapper for tests that we will return False for
- has_unexpected_output in the predictable mode.
- """
- def has_unexpected_output(self, output):
- return False
+class PredictableFilterProc(testproc_base.TestProcFilter):
+ def _filter(self, test):
+ return (statusfile.FAIL in test.expected_outcomes or
+ test.output_proc.negative)
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 06db32802c..775ddfeb07 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -40,7 +40,7 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class TestCase(object):
- def __init__(self, suite, path, name):
+ def __init__(self, suite, path, name, test_config):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
@@ -49,60 +49,40 @@ class TestCase(object):
self.variant = None # name of the used testing variant
self.variant_flags = [] # list of strings, flags specific to this test
- self.id = None # int, used to map result back to TestCase instance
- self.run = 1 # The nth time this test is executed.
- self.cmd = None
-
# Fields used by the test processors.
self.origin = None # Test that this test is subtest of.
self.processor = None # Processor that created this subtest.
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
+ # Test config contains information needed to build the command.
+ self._test_config = test_config
+ self._random_seed = None # Overrides test config value if not None
+
+ # Outcomes
self._statusfile_outcomes = None
- self._expected_outcomes = None # optimization: None == [statusfile.PASS]
+ self.expected_outcomes = None
self._statusfile_flags = None
+
self._prepare_outcomes()
def create_subtest(self, processor, subtest_id, variant=None, flags=None,
- keep_output=False):
+ keep_output=False, random_seed=None):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
subtest.procid += '.%s' % subtest_id
- subtest.keep_output = keep_output
+ subtest.keep_output |= keep_output
+ if random_seed:
+ subtest._random_seed = random_seed
+ if flags:
+ subtest.variant_flags = subtest.variant_flags + flags
if variant is not None:
assert self.variant is None
subtest.variant = variant
- subtest.variant_flags = flags
subtest._prepare_outcomes()
return subtest
- def create_variant(self, variant, flags, procid_suffix=None):
- """Makes a shallow copy of the object and updates variant, variant flags and
- all fields that depend on it, e.g. expected outcomes.
-
- Args
- variant - variant name
- flags - flags that should be added to origin test's variant flags
- procid_suffix - for multiple variants with the same name set suffix to
- keep procid unique.
- """
- other = copy.copy(self)
- if not self.variant_flags:
- other.variant_flags = flags
- else:
- other.variant_flags = self.variant_flags + flags
- other.variant = variant
- if procid_suffix:
- other.procid += '[%s-%s]' % (variant, procid_suffix)
- else:
- other.procid += '[%s]' % variant
-
- other._prepare_outcomes(variant != self.variant)
-
- return other
-
def _prepare_outcomes(self, force_update=True):
if force_update or self._statusfile_outcomes is None:
def is_flag(outcome):
@@ -160,16 +140,17 @@ class TestCase(object):
def only_standard_variant(self):
return statusfile.NO_VARIANTS in self._statusfile_outcomes
- def get_command(self, context):
- params = self._get_cmd_params(context)
+ def get_command(self):
+ params = self._get_cmd_params()
env = self._get_cmd_env()
- shell, shell_flags = self._get_shell_with_flags(context)
- timeout = self._get_timeout(params, context.timeout)
- return self._create_cmd(shell, shell_flags + params, env, timeout, context)
+ shell, shell_flags = self._get_shell_with_flags()
+ timeout = self._get_timeout(params)
+ return self._create_cmd(shell, shell_flags + params, env, timeout)
- def _get_cmd_params(self, ctx):
+ def _get_cmd_params(self):
"""Gets command parameters and combines them in the following order:
- files [empty by default]
+ - random seed
- extra flags (from command line)
- user flags (variant/fuzzer flags)
- statusfile flags
@@ -180,23 +161,31 @@ class TestCase(object):
methods for getting partial parameters.
"""
return (
- self._get_files_params(ctx) +
- self._get_extra_flags(ctx) +
+ self._get_files_params() +
+ self._get_random_seed_flags() +
+ self._get_extra_flags() +
self._get_variant_flags() +
self._get_statusfile_flags() +
- self._get_mode_flags(ctx) +
+ self._get_mode_flags() +
self._get_source_flags() +
- self._get_suite_flags(ctx)
+ self._get_suite_flags()
)
def _get_cmd_env(self):
return {}
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return []
- def _get_extra_flags(self, ctx):
- return ctx.extra_flags
+ def _get_random_seed_flags(self):
+ return ['--random-seed=%d' % self.random_seed]
+
+ @property
+ def random_seed(self):
+ return self._random_seed or self._test_config.random_seed
+
+ def _get_extra_flags(self):
+ return self._test_config.extra_flags
def _get_variant_flags(self):
return self.variant_flags
@@ -208,27 +197,26 @@ class TestCase(object):
"""
return self._statusfile_flags
- def _get_mode_flags(self, ctx):
- return ctx.mode_flags
+ def _get_mode_flags(self):
+ return self._test_config.mode_flags
def _get_source_flags(self):
return []
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return []
- def _get_shell_with_flags(self, ctx):
+ def _get_shell_with_flags(self):
shell = self.get_shell()
shell_flags = []
if shell == 'd8':
shell_flags.append('--test')
if utils.IsWindows():
shell += '.exe'
- if ctx.random_seed:
- shell_flags.append('--random-seed=%s' % ctx.random_seed)
return shell, shell_flags
- def _get_timeout(self, params, timeout):
+ def _get_timeout(self, params):
+ timeout = self._test_config.timeout
if "--stress-opt" in params:
timeout *= 4
if "--noenable-vfp3" in params:
@@ -244,14 +232,14 @@ class TestCase(object):
def _get_suffix(self):
return '.js'
- def _create_cmd(self, shell, params, env, timeout, ctx):
+ def _create_cmd(self, shell, params, env, timeout):
return command.Command(
- cmd_prefix=ctx.command_prefix,
- shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
+ cmd_prefix=self._test_config.command_prefix,
+ shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
args=params,
env=env,
timeout=timeout,
- verbose=ctx.verbose
+ verbose=self._test_config.verbose
)
def _parse_source_flags(self, source=None):
@@ -281,18 +269,9 @@ class TestCase(object):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
- (self.suite.name, self.name, self.variant_flags),
- (other.suite.name, other.name, other.variant_flags)
+ (self.suite.name, self.name, self.variant),
+ (other.suite.name, other.name, other.variant)
)
- def __hash__(self):
- return hash((self.suite.name, self.name, ''.join(self.variant_flags)))
-
def __str__(self):
return self.suite.name + '/' + self.name
-
- # TODO(majeski): Rename `id` field or `get_id` function since they're
- # unrelated.
- def get_id(self):
- return '%s/%s %s' % (
- self.suite.name, self.name, ' '.join(self.variant_flags))
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index 9a9db4e81d..d1953dda99 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -2,24 +2,45 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import collections
import itertools
+from ..testproc.base import (
+ DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
from ..local import statusfile
from ..testproc.result import Result
OUTCOMES_PASS = [statusfile.PASS]
OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
class BaseOutProc(object):
- def process(self, output):
- return Result(self.has_unexpected_output(output), output)
+ def process(self, output, reduction=None):
+ has_unexpected_output = self.has_unexpected_output(output)
+ return self._create_result(has_unexpected_output, output, reduction)
def has_unexpected_output(self, output):
return self.get_outcome(output) not in self.expected_outcomes
+ def _create_result(self, has_unexpected_output, output, reduction):
+ """Creates Result instance. When reduction is passed it tries to drop some
+ parts of the result to save memory and time needed to send the result
+ across process boundary. None disables reduction and full result is created.
+ """
+ if reduction == DROP_RESULT:
+ return None
+ if reduction == DROP_OUTPUT:
+ return Result(has_unexpected_output, None)
+ if not has_unexpected_output:
+ if reduction == DROP_PASS_OUTPUT:
+ return Result(has_unexpected_output, None)
+ if reduction == DROP_PASS_STDOUT:
+ return Result(has_unexpected_output, output.without_text())
+
+ return Result(has_unexpected_output, output)
+
def get_outcome(self, output):
if output.HasCrashed():
return statusfile.CRASH
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 3be2099252..d3d2bd53a6 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -5,47 +5,34 @@
# found in the LICENSE file.
-from collections import OrderedDict
-from os.path import join
-import multiprocessing
import os
-import random
-import shlex
-import subprocess
+import re
import sys
-import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
from testrunner.local import utils
-from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
-from testrunner.objects import context
from testrunner.objects import predictable
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import (VerboseProgressIndicator,
- ResultsTracker,
- TestsCounter)
-from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.progress import ResultsTracker, TestsCounter
+from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc
+from testrunner.utils import random_utils
-TIMEOUT_DEFAULT = 60
+ARCH_GUESS = utils.DefaultArch()
-# Variants ordered by expected runtime (slowest first).
VARIANTS = ["default"]
MORE_VARIANTS = [
- "stress",
- "stress_incremental_marking",
"nooptimization",
+ "stress",
"stress_background_compile",
+ "stress_incremental_marking",
"wasm_traps",
]
@@ -57,7 +44,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ("more" first - it has the longer running tests).
"exhaustive": MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- "extra": ["future", "liftoff", "trusted"],
+ "extra": ["future", "liftoff", "trusted", "wasm_no_native"],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
@@ -65,15 +52,9 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mips",
- "mipsel",
- "mips64",
- "mips64el",
- "s390",
- "s390x",
- "arm64"]
+RANDOM_GC_STRESS_FLAGS = ["--random-gc-interval=5000",
+ "--stress-compaction-random"]
+
PREDICTABLE_WRAPPER = os.path.join(
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
@@ -84,72 +65,18 @@ class StandardTestRunner(base_runner.BaseTestRunner):
super(StandardTestRunner, self).__init__(*args, **kwargs)
self.sancov_dir = None
+ self._variants = None
def _get_default_suite_names(self):
return ['default']
- def _do_execute(self, suites, args, options):
- if options.swarming:
- # Swarming doesn't print how isolated commands are called. Lets make
- # this less cryptic by printing it ourselves.
- print ' '.join(sys.argv)
-
- if utils.GuessOS() == "macos":
- # TODO(machenbach): Temporary output for investigating hanging test
- # driver on mac.
- print "V8 related processes running on this host:"
- try:
- print subprocess.check_output(
- "ps -e | egrep 'd8|cctest|unittests'", shell=True)
- except Exception:
- pass
-
- return self._execute(args, options, suites)
-
def _add_parser_options(self, parser):
- parser.add_option("--sancov-dir",
- help="Directory where to collect coverage data")
- parser.add_option("--cfi-vptr",
- help="Run tests with UBSAN cfi_vptr option.",
- default=False, action="store_true")
parser.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3"
" support",
default=False, action="store_true")
- parser.add_option("--cat", help="Print the source of the tests",
- default=False, action="store_true")
- parser.add_option("--slow-tests",
- help="Regard slow tests (run|skip|dontcare)",
- default="dontcare")
- parser.add_option("--pass-fail-tests",
- help="Regard pass|fail tests (run|skip|dontcare)",
- default="dontcare")
- parser.add_option("--gc-stress",
- help="Switch on GC stress mode",
- default=False, action="store_true")
- parser.add_option("--command-prefix",
- help="Prepended to each shell command used to run a"
- " test",
- default="")
- parser.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- action="append", default=[])
- parser.add_option("--infra-staging", help="Use new test runner features",
- default=False, action="store_true")
- parser.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- parser.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- parser.add_option("--no-harness", "--noharness",
- help="Run without test harness of a given suite",
- default=False, action="store_true")
- parser.add_option("--no-presubmit", "--nopresubmit",
- help='Skip presubmit checks (deprecated)',
- default=False, dest="no_presubmit", action="store_true")
- parser.add_option("--no-sorting", "--nosorting",
- help="Don't sort tests according to duration of last"
- " run.",
- default=False, dest="no_sorting", action="store_true")
+
+ # Variants
parser.add_option("--no-variants", "--novariants",
help="Deprecated. "
"Equivalent to passing --variants=default",
@@ -161,67 +88,80 @@ class StandardTestRunner(base_runner.BaseTestRunner):
default=False, action="store_true",
help="Deprecated. "
"Equivalent to passing --variants=exhaustive")
- parser.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
+
+ # Filters
+ parser.add_option("--slow-tests", default="dontcare",
+ help="Regard slow tests (run|skip|dontcare)")
+ parser.add_option("--pass-fail-tests", default="dontcare",
+ help="Regard pass|fail tests (run|skip|dontcare)")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
- parser.add_option("--report", help="Print a summary of the tests to be"
- " run",
- default=False, action="store_true")
- parser.add_option("--json-test-results",
- help="Path to a file for storing json results.")
- parser.add_option("--flakiness-results",
- help="Path to a file for storing flakiness json.")
- parser.add_option("--rerun-failures-count",
- help=("Number of times to rerun each failing test case."
- " Very slow tests will be rerun only once."),
- default=0, type="int")
- parser.add_option("--rerun-failures-max",
- help="Maximum number of failing test cases to rerun.",
- default=100, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
- parser.add_option("--swarming",
- help="Indicates running test driver on swarming.",
+
+ # Stress modes
+ parser.add_option("--gc-stress",
+ help="Switch on GC stress mode",
+ default=False, action="store_true")
+ parser.add_option("--random-gc-stress",
+ help="Switch on random GC stress mode",
+ default=False, action="store_true")
+ parser.add_option("--random-seed-stress-count", default=1, type="int",
+ dest="random_seed_stress_count",
+ help="Number of runs with different random seeds. Only "
+ "with test processors: 0 means infinite "
+ "generation.")
+
+ # Noop
+ parser.add_option("--cfi-vptr",
+ help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
+ parser.add_option("--infra-staging", help="Use new test runner features",
+ dest='infra_staging', default=None,
+ action="store_true")
+ parser.add_option("--no-infra-staging",
+ help="Opt out of new test runner features",
+ dest='infra_staging', default=None,
+ action="store_false")
+ parser.add_option("--no-sorting", "--nosorting",
+ help="Don't sort tests according to duration of last"
+ " run.",
+ default=False, dest="no_sorting", action="store_true")
+ parser.add_option("--no-presubmit", "--nopresubmit",
+ help='Skip presubmit checks (deprecated)',
+ default=False, dest="no_presubmit", action="store_true")
+
+ # Unimplemented for test processors
+ parser.add_option("--sancov-dir",
+ help="Directory where to collect coverage data")
+ parser.add_option("--cat", help="Print the source of the tests",
+ default=False, action="store_true")
+ parser.add_option("--flakiness-results",
+ help="Path to a file for storing flakiness json.")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
- parser.add_option("-t", "--timeout", help="Timeout in seconds",
- default=TIMEOUT_DEFAULT, type="int")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite",
- help="The testsuite name in the JUnit output file",
- default="v8tests")
- parser.add_option("--random-seed", default=0, dest="random_seed",
- help="Default seed for initializing random generator",
- type=int)
- parser.add_option("--random-seed-stress-count", default=1, type="int",
- dest="random_seed_stress_count",
- help="Number of runs with different random seeds")
+ parser.add_option("--report", default=False, action="store_true",
+ help="Print a summary of the tests to be run")
- def _process_options(self, options):
- global VARIANTS
+ def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
if not os.path.exists(self.sancov_dir):
print("sancov-dir %s doesn't exist" % self.sancov_dir)
raise base_runner.TestRunnerError()
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
-
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
+ if options.random_gc_stress:
+ options.extra_flags += RANDOM_GC_STRESS_FLAGS
+
if self.build_config.asan:
options.extra_flags.append("--invoke-weak-callbacks")
options.extra_flags.append("--omit-quit")
@@ -265,31 +205,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.msan:
options.variants = "default"
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
-
- if options.random_seed_stress_count <= 1 and options.random_seed == 0:
- options.random_seed = self._random_seed()
-
- # Use developer defaults if no variant was specified.
- options.variants = options.variants or "dev"
-
if options.variants == "infra_staging":
options.variants = "exhaustive"
- options.infra_staging = True
- # Resolve variant aliases and dedupe.
- # TODO(machenbach): Don't mutate global variable. Rather pass mutated
- # version as local variable.
- VARIANTS = list(set(reduce(
- list.__add__,
- (VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
- [],
- )))
-
- if not set(VARIANTS).issubset(ALL_VARIANTS):
- print "All variants must be in %s" % str(ALL_VARIANTS)
- raise base_runner.TestRunnerError()
+ self._variants = self._parse_variants(options.variants)
def CheckTestMode(name, option): # pragma: no cover
if not option in ["run", "skip", "dontcare"]:
@@ -303,6 +222,23 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# TODO(machenbach): uncomment after infra side lands.
# base_runner.TEST_MAP["d8_default"].remove("intl")
+ def _parse_variants(self, aliases_str):
+ # Use developer defaults if no variant was specified.
+ aliases_str = aliases_str or 'dev'
+ aliases = aliases_str.split(',')
+ user_variants = set(reduce(
+ list.__add__, [VARIANT_ALIASES.get(a, [a]) for a in aliases]))
+
+ result = [v for v in ALL_VARIANTS if v in user_variants]
+ if len(result) == len(user_variants):
+ return result
+
+ for v in user_variants:
+ if v not in ALL_VARIANTS:
+ print 'Unknown variant: %s' % v
+ raise base_runner.TestRunnerError()
+ assert False, 'Unreachable'
+
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
@@ -316,228 +252,39 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"allow_user_segv_handler=1",
])
- def _random_seed(self):
- seed = 0
- while not seed:
- seed = random.SystemRandom().randint(-2147483648, 2147483647)
- return seed
-
- def _execute(self, args, options, suites):
- print(">>> Running tests for %s.%s" % (self.build_config.arch,
- self.mode_name))
- # Populate context object.
-
- # Simulators are slow, therefore allow a longer timeout.
- if self.build_config.arch in SLOW_ARCHS:
- options.timeout *= 2
+ def _get_statusfile_variables(self, options):
+ variables = (
+ super(StandardTestRunner, self)._get_statusfile_variables(options))
- options.timeout *= self.mode_options.timeout_scalefactor
-
- if self.build_config.predictable:
- # Predictable mode is slower.
- options.timeout *= 2
-
- ctx = context.Context(self.build_config.arch,
- self.mode_options.execution_mode,
- self.outdir,
- self.mode_options.flags,
- options.verbose,
- options.timeout,
- options.isolates,
- options.command_prefix,
- options.extra_flags,
- self.build_config.no_i18n,
- options.random_seed,
- options.no_sorting,
- options.rerun_failures_count,
- options.rerun_failures_max,
- options.no_harness,
- use_perf_data=not options.swarming,
- sancov_dir=self.sancov_dir,
- infra_staging=options.infra_staging)
-
- # TODO(all): Combine "simulator" and "simulator_run".
- # TODO(machenbach): In GN we can derive simulator run from
- # target_arch != v8_target_arch in the dumped build config.
simulator_run = (
not options.dont_skip_simulator_slow_tests and
self.build_config.arch in [
'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
'ppc64', 's390', 's390x'] and
- bool(base_runner.ARCH_GUESS) and
- self.build_config.arch != base_runner.ARCH_GUESS)
- # Find available test suites and read test cases from them.
- variables = {
- "arch": self.build_config.arch,
- "asan": self.build_config.asan,
- "byteorder": sys.byteorder,
- "dcheck_always_on": self.build_config.dcheck_always_on,
- "deopt_fuzzer": False,
- "gc_fuzzer": False,
- "gc_stress": options.gc_stress,
- "gcov_coverage": self.build_config.gcov_coverage,
- "isolates": options.isolates,
- "mode": self.mode_options.status_mode,
- "msan": self.build_config.msan,
- "no_harness": options.no_harness,
- "no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
- "novfp3": options.novfp3,
- "predictable": self.build_config.predictable,
- "simulator": utils.UseSimulator(self.build_config.arch),
- "simulator_run": simulator_run,
- "system": utils.GuessOS(),
- "tsan": self.build_config.tsan,
- "ubsan_vptr": self.build_config.ubsan_vptr,
- }
-
- progress_indicator = progress.IndicatorNotifier()
- progress_indicator.Register(
- progress.PROGRESS_INDICATORS[options.progress]())
- if options.junitout: # pragma: no cover
- progress_indicator.Register(progress.JUnitTestProgressIndicator(
- options.junitout, options.junittestsuite))
- if options.json_test_results:
- progress_indicator.Register(progress.JsonTestProgressIndicator(
- options.json_test_results,
- self.build_config.arch,
- self.mode_options.execution_mode,
- ctx.random_seed))
- if options.flakiness_results: # pragma: no cover
- progress_indicator.Register(progress.FlakinessTestProgressIndicator(
- options.flakiness_results))
-
- if options.infra_staging:
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
-
- return self._run_test_procs(suites, args, options, progress_indicator,
- ctx)
-
- all_tests = []
- num_tests = 0
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- all_tests += s.tests
-
- # First filtering by status applying the generic rules (tests without
- # variants)
- if options.warn_unused:
- tests = [(t.name, t.variant) for t in s.tests]
- s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
- s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
-
- if options.cat:
- verbose.PrintTestSource(s.tests)
- continue
- variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
- variant_tests = [ t.create_variant(v, flags)
- for t in s.tests
- for v in variant_gen.FilterVariantsByTest(t)
- for flags in variant_gen.GetFlagSets(t, v) ]
-
- if options.random_seed_stress_count > 1:
- # Duplicate test for random seed stress mode.
- def iter_seed_flags():
- for _ in range(0, options.random_seed_stress_count):
- # Use given random seed for all runs (set by default in
- # execution.py) or a new random seed if none is specified.
- if options.random_seed:
- yield []
- else:
- yield ["--random-seed=%d" % self._random_seed()]
- s.tests = [
- t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
- for t in variant_tests
- for n, flags in enumerate(iter_seed_flags())
- ]
- else:
- s.tests = variant_tests
-
- # Second filtering by status applying also the variant-dependent rules.
- if options.warn_unused:
- tests = [(t.name, t.variant) for t in s.tests]
- s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
-
- s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
- s.tests = self._shard_tests(s.tests, options)
-
- for t in s.tests:
- t.cmd = t.get_command(ctx)
-
- num_tests += len(s.tests)
-
- if options.cat:
- return 0 # We're done here.
-
- if options.report:
- verbose.PrintReport(all_tests)
-
- # Run the tests.
- start_time = time.time()
-
- if self.build_config.predictable:
- outproc_factory = predictable.get_outproc
- else:
- outproc_factory = None
-
- runner = execution.Runner(suites, progress_indicator, ctx,
- outproc_factory)
- exit_code = runner.Run(options.j)
- overall_duration = time.time() - start_time
+ bool(ARCH_GUESS) and
+ self.build_config.arch != ARCH_GUESS)
- if options.time:
- verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
+ variables.update({
+ 'gc_stress': options.gc_stress or options.random_gc_stress,
+ 'novfp3': options.novfp3,
+ 'simulator_run': simulator_run,
+ })
+ return variables
- if num_tests == 0:
- print("Warning: no tests were run!")
-
- if exit_code == 1 and options.json_test_results:
- print("Force exit code 0 after failures. Json test results file "
- "generated with failure information.")
- exit_code = 0
-
- if self.sancov_dir:
- # If tests ran with sanitizer coverage, merge coverage files in the end.
- try:
- print "Merging sancov files."
- subprocess.check_call([
- sys.executable,
- join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
- "--coverage-dir=%s" % self.sancov_dir])
- except:
- print >> sys.stderr, "Error: Merging sancov files failed."
- exit_code = 1
-
- return exit_code
-
- def _shard_tests(self, tests, options):
- shard_run, shard_count = self._get_shard_info(options)
-
- if shard_count < 2:
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
- def _run_test_procs(self, suites, args, options, progress_indicator,
- context):
+ def _do_execute(self, tests, args, options):
jobs = options.j
print '>>> Running with test processors'
loader = LoadProc()
tests_counter = TestsCounter()
results = ResultsTracker()
- indicators = progress_indicator.ToProgressIndicatorProcs()
- execproc = ExecutionProc(jobs, context)
+ indicators = self._create_progress_indicators(options)
+
+ outproc_factory = None
+ if self.build_config.predictable:
+ outproc_factory = predictable.get_outproc
+ execproc = ExecutionProc(jobs, outproc_factory)
+ sigproc = self._create_signal_proc()
procs = [
loader,
@@ -545,54 +292,60 @@ class StandardTestRunner(base_runner.BaseTestRunner):
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_shard_proc(options),
tests_counter,
- VariantProc(VARIANTS),
+ VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+ self._create_predictable_filter(),
+ self._create_seed_proc(options),
+ sigproc,
] + indicators + [
results,
- self._create_rerun_proc(context),
+ self._create_timeout_proc(options),
+ self._create_rerun_proc(options),
execproc,
]
- procs = filter(None, procs)
-
- for i in xrange(0, len(procs) - 1):
- procs[i].connect_to(procs[i + 1])
-
- tests = [t for s in suites for t in s.tests]
+ self._prepare_procs(procs)
tests.sort(key=lambda t: t.is_slow, reverse=True)
- loader.setup()
loader.load_tests(tests)
print '>>> Running %d base tests' % tests_counter.total
tests_counter.remove_from_chain()
- execproc.start()
+ # This starts up worker processes and blocks until all tests are
+ # processed.
+ execproc.run()
for indicator in indicators:
indicator.finished()
- print '>>> %d tests ran' % results.total
+ print '>>> %d tests ran' % (results.total - results.remaining)
- exit_code = 0
+ exit_code = utils.EXIT_CODE_PASS
if results.failed:
- exit_code = 1
- if results.remaining:
- exit_code = 2
+ exit_code = utils.EXIT_CODE_FAILURES
+ if not results.total:
+ exit_code = utils.EXIT_CODE_NO_TESTS
+ # Indicate if a SIGINT or SIGTERM happened.
+ exit_code = max(exit_code, sigproc.exit_code)
- if exit_code == 1 and options.json_test_results:
+ if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
- exit_code = 0
+ exit_code = utils.EXIT_CODE_PASS
return exit_code
- def _create_rerun_proc(self, ctx):
- if not ctx.rerun_failures_count:
+ def _create_predictable_filter(self):
+ if not self.build_config.predictable:
return None
- return RerunProc(ctx.rerun_failures_count,
- ctx.rerun_failures_max)
+ return predictable.PredictableFilterProc()
+ def _create_seed_proc(self, options):
+ if options.random_seed_stress_count == 1:
+ return None
+ return SeedProc(options.random_seed_stress_count, options.random_seed,
+ options.j * 4)
if __name__ == '__main__':
diff --git a/deps/v8/tools/testrunner/test_config.py b/deps/v8/tools/testrunner/test_config.py
new file mode 100644
index 0000000000..d9418fe9ac
--- /dev/null
+++ b/deps/v8/tools/testrunner/test_config.py
@@ -0,0 +1,32 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+from .utils import random_utils
+
+
+class TestConfig(object):
+ def __init__(self,
+ command_prefix,
+ extra_flags,
+ isolates,
+ mode_flags,
+ no_harness,
+ noi18n,
+ random_seed,
+ shell_dir,
+ timeout,
+ verbose):
+ self.command_prefix = command_prefix
+ self.extra_flags = extra_flags
+ self.isolates = isolates
+ self.mode_flags = mode_flags
+ self.no_harness = no_harness
+ self.noi18n = noi18n
+ # random_seed is always not None.
+ self.random_seed = random_seed or random_utils.random_seed()
+ self.shell_dir = shell_dir
+ self.timeout = timeout
+ self.verbose = verbose
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
index 1a87dbed55..5cb1182e89 100644
--- a/deps/v8/tools/testrunner/testproc/base.py
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -37,36 +37,12 @@ DROP_OUTPUT = 1
DROP_PASS_OUTPUT = 2
DROP_PASS_STDOUT = 3
-def get_reduce_result_function(requirement):
- if requirement == DROP_RESULT:
- return lambda _: None
-
- if requirement == DROP_OUTPUT:
- def f(result):
- result.output = None
- return result
- return f
-
- if requirement == DROP_PASS_OUTPUT:
- def f(result):
- if not result.has_unexpected_output:
- result.output = None
- return result
- return f
-
- if requirement == DROP_PASS_STDOUT:
- def f(result):
- if not result.has_unexpected_output:
- result.output.stdout = None
- result.output.stderr = None
- return result
- return f
-
class TestProc(object):
def __init__(self):
self._prev_proc = None
self._next_proc = None
+ self._stopped = False
self._requirement = DROP_RESULT
self._prev_requirement = None
self._reduce_result = lambda result: result
@@ -90,8 +66,14 @@ class TestProc(object):
self._prev_requirement = requirement
if self._next_proc:
self._next_proc.setup(max(requirement, self._requirement))
- if self._prev_requirement < self._requirement:
- self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+ # Since we're not winning anything by droping part of the result we are
+ # dropping the whole result or pass it as it is. The real reduction happens
+ # during result creation (in the output processor), so the result is
+ # immutable.
+ if (self._prev_requirement < self._requirement and
+ self._prev_requirement == DROP_RESULT):
+ self._reduce_result = lambda _: None
def next_test(self, test):
"""
@@ -111,6 +93,18 @@ class TestProc(object):
if self._prev_proc:
self._prev_proc.heartbeat()
+ def stop(self):
+ if not self._stopped:
+ self._stopped = True
+ if self._prev_proc:
+ self._prev_proc.stop()
+ if self._next_proc:
+ self._next_proc.stop()
+
+ @property
+ def is_stopped(self):
+ return self._stopped
+
### Communication
def _send_test(self, test):
@@ -119,7 +113,8 @@ class TestProc(object):
def _send_result(self, test, result):
"""Helper method for sending result to the previous processor."""
- result = self._reduce_result(result)
+ if not test.keep_output:
+ result = self._reduce_result(result)
self._prev_proc.result_for(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/combiner.py b/deps/v8/tools/testrunner/testproc/combiner.py
new file mode 100644
index 0000000000..50944e1e5e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/combiner.py
@@ -0,0 +1,124 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import time
+
+from . import base
+from ..objects import testcase
+from ..outproc import base as outproc
+
+
+class CombinerProc(base.TestProc):
+ def __init__(self, rng, min_group_size, max_group_size, count):
+ """
+ Args:
+ rng: random number generator
+ min_group_size: minimum number of tests to combine
+ max_group_size: maximum number of tests to combine
+ count: how many tests to generate. 0 means infinite running
+ """
+ super(CombinerProc, self).__init__()
+
+ self._rng = rng
+ self._min_size = min_group_size
+ self._max_size = max_group_size
+ self._count = count
+
+ # Index of the last generated test
+ self._current_num = 0
+
+ # {suite name: instance of TestGroups}
+ self._groups = defaultdict(TestGroups)
+
+ # {suite name: instance of TestCombiner}
+ self._combiners = {}
+
+ def setup(self, requirement=base.DROP_RESULT):
+ # Combiner is not able to pass results (even as None) to the previous
+ # processor.
+ assert requirement == base.DROP_RESULT
+ self._next_proc.setup(base.DROP_RESULT)
+
+ def next_test(self, test):
+ group_key = self._get_group_key(test)
+ if not group_key:
+ # Test not suitable for combining
+ return
+
+ self._groups[test.suite.name].add_test(group_key, test)
+
+ def _get_group_key(self, test):
+ combiner = self._get_combiner(test.suite)
+ if not combiner:
+ print ('>>> Warning: There is no combiner for %s testsuite' %
+ test.suite.name)
+ return None
+ return combiner.get_group_key(test)
+
+ def result_for(self, test, result):
+ self._send_next_test()
+
+ def generate_initial_tests(self, num=1):
+ for _ in xrange(0, num):
+ self._send_next_test()
+
+ def _send_next_test(self):
+ if self.is_stopped:
+ return
+
+ if self._count and self._current_num >= self._count:
+ return
+
+ combined_test = self._create_new_test()
+ if not combined_test:
+ # Not enough tests
+ return
+
+ self._send_test(combined_test)
+
+ def _create_new_test(self):
+ suite, combiner = self._select_suite()
+ groups = self._groups[suite]
+
+ max_size = self._rng.randint(self._min_size, self._max_size)
+ sample = groups.sample(self._rng, max_size)
+ if not sample:
+ return None
+
+ self._current_num += 1
+ return combiner.combine('%s-%d' % (suite, self._current_num), sample)
+
+ def _select_suite(self):
+ """Returns pair (suite name, combiner)."""
+ selected = self._rng.randint(0, len(self._groups) - 1)
+ for n, suite in enumerate(self._groups):
+ if n == selected:
+ return suite, self._combiners[suite]
+
+ def _get_combiner(self, suite):
+ combiner = self._combiners.get(suite.name)
+ if not combiner:
+ combiner = suite.get_test_combiner()
+ self._combiners[suite.name] = combiner
+ return combiner
+
+
+class TestGroups(object):
+ def __init__(self):
+ self._groups = defaultdict(list)
+ self._keys = []
+
+ def add_test(self, key, test):
+ self._groups[key].append(test)
+ self._keys.append(key)
+
+ def sample(self, rng, max_size):
+ # Not enough tests
+ if not self._groups:
+ return None
+
+ group_key = rng.choice(self._keys)
+ tests = self._groups[group_key]
+ return [rng.choice(tests) for _ in xrange(0, max_size)]
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
index 021b02af3e..2d1ea02cd0 100644
--- a/deps/v8/tools/testrunner/testproc/execution.py
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -15,12 +15,12 @@ def run_job(job, process_context):
return job.run(process_context)
-def create_process_context(requirement):
- return ProcessContext(base.get_reduce_result_function(requirement))
+def create_process_context(result_reduction):
+ return ProcessContext(result_reduction)
JobResult = collections.namedtuple('JobResult', ['id', 'result'])
-ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+ProcessContext = collections.namedtuple('ProcessContext', ['result_reduction'])
class Job(object):
@@ -32,9 +32,8 @@ class Job(object):
def run(self, process_ctx):
output = self.cmd.execute()
- result = self.outproc.process(output)
- if not self.keep_output:
- result = process_ctx.reduce_result_f(result)
+ reduction = process_ctx.result_reduction if not self.keep_output else None
+ result = self.outproc.process(output, reduction)
return JobResult(self.test_id, result)
@@ -44,49 +43,51 @@ class ExecutionProc(base.TestProc):
sends results to the previous processor.
"""
- def __init__(self, jobs, context):
+ def __init__(self, jobs, outproc_factory=None):
super(ExecutionProc, self).__init__()
self._pool = pool.Pool(jobs)
- self._context = context
+ self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
self._tests = {}
def connect_to(self, next_proc):
assert False, 'ExecutionProc cannot be connected to anything'
- def start(self):
- try:
- it = self._pool.imap_unordered(
+ def run(self):
+ it = self._pool.imap_unordered(
fn=run_job,
gen=[],
process_context_fn=create_process_context,
process_context_args=[self._prev_requirement],
- )
- for pool_result in it:
- if pool_result.heartbeat:
- continue
-
- job_result = pool_result.value
- test_id, result = job_result
-
- test, result.cmd = self._tests[test_id]
- del self._tests[test_id]
- self._send_result(test, result)
- except KeyboardInterrupt:
- raise
- except:
- traceback.print_exc()
- raise
- finally:
- self._pool.terminate()
+ )
+ for pool_result in it:
+ self._unpack_result(pool_result)
def next_test(self, test):
+ if self.is_stopped:
+ return
+
test_id = test.procid
- cmd = test.get_command(self._context)
+ cmd = test.get_command()
self._tests[test_id] = test, cmd
- # TODO(majeski): Needs factory for outproc as in local/execution.py
- outproc = test.output_proc
+ outproc = self._outproc_factory(test)
self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
def result_for(self, test, result):
assert False, 'ExecutionProc cannot receive results'
+
+ def stop(self):
+ super(ExecutionProc, self).stop()
+ self._pool.abort()
+
+ def _unpack_result(self, pool_result):
+ if pool_result.heartbeat:
+ self.heartbeat()
+ return
+
+ job_result = pool_result.value
+ test_id, result = job_result
+
+ test, result.cmd = self._tests[test_id]
+ del self._tests[test_id]
+ self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
new file mode 100644
index 0000000000..607c010cf3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -0,0 +1,27 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+from testrunner.local import statusfile
+from testrunner.outproc import base as outproc
+
+class ForgiveTimeoutProc(base.TestProcProducer):
+ """Test processor passing tests and results through and forgiving timeouts."""
+ def __init__(self):
+ super(ForgiveTimeoutProc, self).__init__('no-timeout')
+
+ def _next_test(self, test):
+ subtest = self._create_subtest(test, 'no_timeout')
+ if subtest.expected_outcomes == outproc.OUTCOMES_PASS:
+ subtest.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+ elif subtest.expected_outcomes == outproc.OUTCOMES_FAIL:
+ subtest.expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
+ elif statusfile.TIMEOUT not in subtest.expected_outcomes:
+ subtest.expected_outcomes = (
+ subtest.expected_outcomes + [statusfile.TIMEOUT])
+ self._send_test(subtest)
+
+ def _result_for(self, test, subtest, result):
+ self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
new file mode 100644
index 0000000000..624b9aac04
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -0,0 +1,287 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import time
+
+from . import base
+
+
+class FuzzerConfig(object):
+ def __init__(self, probability, analyzer, fuzzer):
+ """
+ Args:
+ probability: of choosing this fuzzer (0; 10]
+ analyzer: instance of Analyzer class, can be None if no analysis is needed
+ fuzzer: instance of Fuzzer class
+ """
+ assert probability > 0 and probability <= 10
+
+ self.probability = probability
+ self.analyzer = analyzer
+ self.fuzzer = fuzzer
+
+
+class Analyzer(object):
+ def get_analysis_flags(self):
+ raise NotImplementedError()
+
+ def do_analysis(self, result):
+ raise NotImplementedError()
+
+
+class Fuzzer(object):
+ def create_flags_generator(self, rng, test, analysis_value):
+ """
+ Args:
+ rng: random number generator
+ test: test for which to create flags
+ analysis_value: value returned by the analyzer. None if there is no
+ corresponding analyzer to this fuzzer or the analysis phase is disabled
+ """
+ raise NotImplementedError()
+
+
+# TODO(majeski): Allow multiple subtests to run at once.
+class FuzzerProc(base.TestProcProducer):
+ def __init__(self, rng, count, fuzzers, disable_analysis=False):
+ """
+ Args:
+ rng: random number generator used to select flags and values for them
+ count: number of tests to generate based on each base test
+ fuzzers: list of FuzzerConfig instances
+ disable_analysis: disable analysis phase and filtering base on it. When
+ set, processor passes None as analysis result to fuzzers
+ """
+ super(FuzzerProc, self).__init__('Fuzzer')
+
+ self._rng = rng
+ self._count = count
+ self._fuzzer_configs = fuzzers
+ self._disable_analysis = disable_analysis
+ self._gens = {}
+
+ def setup(self, requirement=base.DROP_RESULT):
+ # Fuzzer is optimized to not store the results
+ assert requirement == base.DROP_RESULT
+ super(FuzzerProc, self).setup(requirement)
+
+ def _next_test(self, test):
+ if self.is_stopped:
+ return
+
+ analysis_subtest = self._create_analysis_subtest(test)
+ if analysis_subtest:
+ self._send_test(analysis_subtest)
+ else:
+ self._gens[test.procid] = self._create_gen(test)
+ self._try_send_next_test(test)
+
+ def _create_analysis_subtest(self, test):
+ if self._disable_analysis:
+ return None
+
+ analysis_flags = []
+ for fuzzer_config in self._fuzzer_configs:
+ if fuzzer_config.analyzer:
+ analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
+
+ if analysis_flags:
+ analysis_flags = list(set(analysis_flags))
+ return self._create_subtest(test, 'analysis', flags=analysis_flags,
+ keep_output=True)
+
+
+ def _result_for(self, test, subtest, result):
+ if not self._disable_analysis:
+ if result is not None:
+ # Analysis phase, for fuzzing we drop the result.
+ if result.has_unexpected_output:
+ self._send_result(test, None)
+ return
+ self._gens[test.procid] = self._create_gen(test, result)
+
+ self._try_send_next_test(test)
+
+ def _create_gen(self, test, analysis_result=None):
+ # It will be called with analysis_result==None only when there is no
+ # analysis phase at all, so no fuzzer has it's own analyzer.
+ gens = []
+ indexes = []
+ for i, fuzzer_config in enumerate(self._fuzzer_configs):
+ analysis_value = None
+ if analysis_result and fuzzer_config.analyzer:
+ analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
+ if not analysis_value:
+ # Skip fuzzer for this test since it doesn't have analysis data
+ continue
+ p = fuzzer_config.probability
+ flag_gen = fuzzer_config.fuzzer.create_flags_generator(self._rng, test,
+ analysis_value)
+ indexes += [len(gens)] * p
+ gens.append((p, flag_gen))
+
+ if not gens:
+ # No fuzzers for this test, skip it
+ return
+
+ i = 0
+ while not self._count or i < self._count:
+ main_index = self._rng.choice(indexes)
+ _, main_gen = gens[main_index]
+
+ flags = next(main_gen)
+ for index, (p, gen) in enumerate(gens):
+ if index == main_index:
+ continue
+ if self._rng.randint(1, 10) <= p:
+ flags += next(gen)
+
+ flags.append('--fuzzer-random-seed=%s' % self._next_seed())
+ yield self._create_subtest(test, str(i), flags=flags)
+
+ i += 1
+
+ def _try_send_next_test(self, test):
+ if not self.is_stopped:
+ for subtest in self._gens[test.procid]:
+ self._send_test(subtest)
+ return
+
+ del self._gens[test.procid]
+ self._send_result(test, None)
+
+ def _next_seed(self):
+ seed = None
+ while not seed:
+ seed = self._rng.randint(-2147483648, 2147483647)
+ return seed
+
+
+class ScavengeAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Maximum new space size reached = '):
+ return int(float(line.split()[7]))
+
+
+class ScavengeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-scavenge=%d' % (analysis_value or 100)]
+
+
+class MarkingAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Maximum marking limit reached = '):
+ return int(float(line.split()[6]))
+
+
+class MarkingFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-marking=%d' % (analysis_value or 100)]
+
+
+class GcIntervalAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Allocations = '):
+ return int(float(line.split()[3][:-1]))
+
+
+class GcIntervalFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ if analysis_value:
+ value = analysis_value / 10
+ else:
+ value = 10000
+ while True:
+ yield ['--random-gc-interval=%d' % value]
+
+
+class CompactionFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-compaction-random']
+
+
+class ThreadPoolSizeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+
+
+class InterruptBudgetFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ limit = 1 + int(rng.random() * 144)
+ yield ['--interrupt-budget=%d' % rng.randint(1, limit * 1024)]
+
+
+class DeoptAnalyzer(Analyzer):
+ MAX_DEOPT=1000000000
+
+ def __init__(self, min_interval):
+ super(DeoptAnalyzer, self).__init__()
+ self._min = min_interval
+
+ def get_analysis_flags(self):
+ return ['--deopt-every-n-times=%d' % self.MAX_DEOPT,
+ '--print-deopt-stress']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('=== Stress deopt counter: '):
+ counter = self.MAX_DEOPT - int(line.split(' ')[-1])
+ if counter < self._min:
+ # Skip this test since we won't generate any meaningful interval with
+ # given minimum.
+ return None
+ return counter
+
+
+class DeoptFuzzer(Fuzzer):
+ def __init__(self, min_interval):
+ super(DeoptFuzzer, self).__init__()
+ self._min = min_interval
+
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ if analysis_value:
+ value = analysis_value / 2
+ else:
+ value = 10000
+ interval = rng.randint(self._min, max(value, self._min))
+ yield ['--deopt-every-n-times=%d' % interval]
+
+
+FUZZERS = {
+ 'compaction': (None, CompactionFuzzer),
+ 'deopt': (DeoptAnalyzer, DeoptFuzzer),
+ 'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
+ 'interrupt_budget': (None, InterruptBudgetFuzzer),
+ 'marking': (MarkingAnalyzer, MarkingFuzzer),
+ 'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+ 'threads': (None, ThreadPoolSizeFuzzer),
+}
+
+
+def create_fuzzer_config(name, probability, *args, **kwargs):
+ analyzer_class, fuzzer_class = FUZZERS[name]
+ return FuzzerConfig(
+ probability,
+ analyzer_class(*args, **kwargs) if analyzer_class else None,
+ fuzzer_class(*args, **kwargs),
+ )
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 78514f7252..221c64bfdd 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -103,6 +103,15 @@ class SimpleProgressIndicator(ProgressIndicator):
class VerboseProgressIndicator(SimpleProgressIndicator):
+ def __init__(self):
+ super(VerboseProgressIndicator, self).__init__()
+ self._last_printed_time = time.time()
+
+ def _print(self, text):
+ print text
+ sys.stdout.flush()
+ self._last_printed_time = time.time()
+
def _on_result_for(self, test, result):
super(VerboseProgressIndicator, self)._on_result_for(test, result)
# TODO(majeski): Support for dummy/grouped results
@@ -113,12 +122,13 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
outcome = 'FAIL'
else:
outcome = 'pass'
- print 'Done running %s: %s' % (test, outcome)
- sys.stdout.flush()
+ self._print('Done running %s: %s' % (test, outcome))
def _on_heartbeat(self):
- print 'Still working...'
- sys.stdout.flush()
+ if time.time() - self._last_printed_time > 30:
+ # Print something every 30 seconds to not get killed by an output
+ # timeout.
+ self._print('Still working...')
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -292,7 +302,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, json_test_results, arch, mode, random_seed):
+ def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
@@ -303,7 +313,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
- self.random_seed = random_seed
self.results = []
self.tests = []
@@ -338,10 +347,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
-
- # TODO(machenbach): This stores only the global random seed from the
- # context and not possible overrides when using random-seed stress.
- "random_seed": self.random_seed,
+ "random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
})
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
index 7f96e0260c..a72bb3ebc6 100644
--- a/deps/v8/tools/testrunner/testproc/rerun.py
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -34,7 +34,7 @@ class RerunProc(base.TestProcProducer):
results = self._results[test.procid]
results.append(result)
- if self._needs_rerun(test, result):
+ if not self.is_stopped and self._needs_rerun(test, result):
self._rerun[test.procid] += 1
if self._rerun_total_left is not None:
self._rerun_total_left -= 1
diff --git a/deps/v8/tools/testrunner/testproc/seed.py b/deps/v8/tools/testrunner/testproc/seed.py
new file mode 100644
index 0000000000..3f40e79b34
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/seed.py
@@ -0,0 +1,58 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+from collections import defaultdict
+
+from . import base
+from ..utils import random_utils
+
+
+class SeedProc(base.TestProcProducer):
+ def __init__(self, count, seed=None, parallel_subtests=1):
+ """
+ Args:
+ count: How many subtests with different seeds to create for each test.
+ 0 means infinite.
+ seed: seed to use. None means random seed for each subtest.
+ parallel_subtests: How many subtest of each test to run at the same time.
+ """
+ super(SeedProc, self).__init__('Seed')
+ self._count = count
+ self._seed = seed
+ self._last_idx = defaultdict(int)
+ self._todo = defaultdict(int)
+ self._parallel_subtests = parallel_subtests
+ if count:
+ self._parallel_subtests = min(self._parallel_subtests, count)
+
+ def setup(self, requirement=base.DROP_RESULT):
+ super(SeedProc, self).setup(requirement)
+
+ # SeedProc is optimized for dropping the result
+ assert requirement == base.DROP_RESULT
+
+ def _next_test(self, test):
+ for _ in xrange(0, self._parallel_subtests):
+ self._try_send_next_test(test)
+
+ def _result_for(self, test, subtest, result):
+ self._todo[test.procid] -= 1
+ self._try_send_next_test(test)
+
+ def _try_send_next_test(self, test):
+ def create_subtest(idx):
+ seed = self._seed or random_utils.random_seed()
+ return self._create_subtest(test, idx, random_seed=seed)
+
+ num = self._last_idx[test.procid]
+ if not self._count or num < self._count:
+ num += 1
+ self._send_test(create_subtest(num))
+ self._todo[test.procid] += 1
+ self._last_idx[test.procid] = num
+ elif not self._todo.get(test.procid):
+ del self._last_idx[test.procid]
+ del self._todo[test.procid]
+ self._send_result(test, None)
diff --git a/deps/v8/tools/testrunner/testproc/sigproc.py b/deps/v8/tools/testrunner/testproc/sigproc.py
new file mode 100644
index 0000000000..e97fe7ece3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/sigproc.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import signal
+
+from . import base
+from testrunner.local import utils
+
+
+class SignalProc(base.TestProcObserver):
+ def __init__(self):
+ super(SignalProc, self).__init__()
+ self.exit_code = utils.EXIT_CODE_PASS
+
+ def setup(self, *args, **kwargs):
+ super(SignalProc, self).setup(*args, **kwargs)
+ # It should be called after processors are chained together to not loose
+ # catched signal.
+ signal.signal(signal.SIGINT, self._on_ctrlc)
+ signal.signal(signal.SIGTERM, self._on_sigterm)
+
+ def _on_ctrlc(self, _signum, _stack_frame):
+ print '>>> Ctrl-C detected, early abort...'
+ self.exit_code = utils.EXIT_CODE_INTERRUPTED
+ self.stop()
+
+ def _on_sigterm(self, _signum, _stack_frame):
+ print '>>> SIGTERM received, early abort...'
+ self.exit_code = utils.EXIT_CODE_TERMINATED
+ self.stop()
diff --git a/deps/v8/tools/testrunner/testproc/timeout.py b/deps/v8/tools/testrunner/testproc/timeout.py
new file mode 100644
index 0000000000..84ddc656e2
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/timeout.py
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from . import base
+
+
+class TimeoutProc(base.TestProcObserver):
+ def __init__(self, duration_sec):
+ super(TimeoutProc, self).__init__()
+ self._duration_sec = duration_sec
+ self._start = time.time()
+
+ def _on_next_test(self, test):
+ self._on_event()
+
+ def _on_result_for(self, test, result):
+ self._on_event()
+
+ def _on_heartbeat(self):
+ self._on_event()
+
+ def _on_event(self):
+ if not self.is_stopped:
+ if time.time() - self._start > self._duration_sec:
+ self.stop()
diff --git a/deps/v8/tools/testrunner/trycatch_loader.js b/deps/v8/tools/testrunner/trycatch_loader.js
new file mode 100644
index 0000000000..737c8e45db
--- /dev/null
+++ b/deps/v8/tools/testrunner/trycatch_loader.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Wrapper loading javascript tests passed as arguments used by gc fuzzer.
+// It ignores all exceptions and run tests in a separate namespaces.
+//
+// It can't prevent %AbortJS function from aborting execution, so it should be
+// used with d8's --disable-abortjs flag to ignore all possible errors inside
+// tests.
+
+// We use -- as an additional separator for test preamble files and test files.
+// The preamble files (before --) will be loaded in each realm before each
+// test.
+var separator = arguments.indexOf("--")
+var preamble = arguments.slice(0, separator)
+var tests = arguments.slice(separator + 1)
+
+var preambleString = ""
+for (let jstest of preamble) {
+ preambleString += "load(\"" + jstest + "\");"
+}
+
+for (let jstest of tests) {
+ print("Loading " + jstest);
+ let start = performance.now();
+
+ // anonymous function to not populate global namespace.
+ (function () {
+ let realm = Realm.create();
+ try {
+ Realm.eval(realm, preambleString + "load(\"" + jstest + "\");");
+ } catch (err) {
+ // ignore all errors
+ }
+ Realm.dispose(realm);
+ })();
+
+ let durationSec = ((performance.now() - start) / 1000.0).toFixed(2);
+ print("Duration " + durationSec + "s");
+}
diff --git a/deps/v8/tools/testrunner/utils/__init__.py b/deps/v8/tools/testrunner/utils/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/utils/random_utils.py b/deps/v8/tools/testrunner/utils/random_utils.py
new file mode 100644
index 0000000000..0d2cb3fa95
--- /dev/null
+++ b/deps/v8/tools/testrunner/utils/random_utils.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+
+def random_seed():
+ """Returns random, non-zero seed."""
+ seed = 0
+ while not seed:
+ seed = random.SystemRandom().randint(-2147483648, 2147483647)
+ return seed
diff --git a/deps/v8/tools/toolchain/BUILD.gn b/deps/v8/tools/toolchain/BUILD.gn
new file mode 100644
index 0000000000..b2462054c4
--- /dev/null
+++ b/deps/v8/tools/toolchain/BUILD.gn
@@ -0,0 +1,23 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/gcc_toolchain.gni")
+
+gcc_toolchain("mips-bundled") {
+ toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+ root_build_dir)
+ cc = "${toolprefix}gcc"
+ cxx = "${toolprefix}g++"
+
+ readelf = "${toolprefix}readelf"
+ nm = "${toolprefix}nm"
+ ar = "${toolprefix}ar"
+ ld = cxx
+
+ toolchain_args = {
+ current_cpu = "mips"
+ current_os = "linux"
+ is_clang = false
+ }
+}
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index cad836b2e3..b77ccafa63 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -20,6 +20,12 @@ BOTS = {
'--nexus10': 'v8_nexus10_perf_try',
}
+# This list will contain builder names that should be triggered on an internal
+# swarming bucket instead of internal Buildbot master.
+SWARMING_BOTS = [
+ 'v8_linux64_perf_try',
+]
+
DEFAULT_BOTS = [
'v8_arm32_perf_try',
'v8_linux32_perf_try',
@@ -50,6 +56,17 @@ PUBLIC_BENCHMARKS = [
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
+def _trigger_bots(bucket, bots, options):
+ cmd = ['git cl try']
+ cmd += ['-B', bucket]
+ cmd += ['-b %s' % bot for bot in bots]
+ if options.revision: cmd += ['-r %s' % options.revision]
+ benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
+ cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+ if options.extra_flags:
+ cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+ subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
+
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
@@ -89,14 +106,13 @@ def main():
subprocess.check_output(
'update_depot_tools', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
- cmd = ['git cl try -m internal.client.v8']
- cmd += ['-b %s' % bot for bot in options.bots]
- if options.revision: cmd += ['-r %s' % options.revision]
- benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
- cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
- if options.extra_flags:
- cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
- subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
+ buildbot_bots = [bot for bot in options.bots if bot not in SWARMING_BOTS]
+ if buildbot_bots:
+ _trigger_bots('master.internal.client.v8', buildbot_bots, options)
+
+ swarming_bots = [bot for bot in options.bots if bot in SWARMING_BOTS]
+ if swarming_bots:
+ _trigger_bots('luci.v8-internal.try', swarming_bots, options)
if __name__ == '__main__': # pragma: no cover
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
index 552e83783a..2167d21a14 100644
--- a/deps/v8/tools/turbolizer/index.html
+++ b/deps/v8/tools/turbolizer/index.html
@@ -5,13 +5,13 @@
<link rel="stylesheet" href="turbo-visualizer.css" />
</head>
<body>
- <div id="left">
+ <div id="left" class="viewpane">
<div id='source-text'>
<pre id='source-text-pre'\>
</div>
</div>
<div class="resizer-left"></div>
- <div id="middle" class="resizable-pane">
+ <div id="middle" class="viewpane">
<div id="graph-toolbox-anchor">
<span id="graph-toolbox">
<input id="layout" type="image" title="layout graph" src="layout-icon.png"
@@ -55,7 +55,7 @@
</text></svg></div>
</div>
<div class="resizer-right"></div>
- <div id="right">
+ <div id="right" class="viewpane">
<div id='disassembly'>
<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
<ul id='disassembly-list' class='nolinenums noindent'>
diff --git a/deps/v8/tools/turbolizer/monkey.js b/deps/v8/tools/turbolizer/monkey.js
index 129f8b3268..29eaaebb36 100644
--- a/deps/v8/tools/turbolizer/monkey.js
+++ b/deps/v8/tools/turbolizer/monkey.js
@@ -2,25 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-Array.prototype.getStaggeredFromMiddle = function(i) {
- if (i >= this.length) {
- throw("getStaggeredFromMiddle: OOB");
- }
- var middle = Math.floor(this.length / 2);
- var index = middle + (((i % 2) == 0) ? (i / 2) : (((1 - i) / 2) - 1));
- return this[index];
-}
-
-Array.prototype.contains = function(obj) {
- var i = this.length;
- while (i--) {
- if (this[i] === obj) {
- return true;
- }
- }
- return false;
-}
-
Math.alignUp = function(raw, multiple) {
return Math.floor((raw + multiple - 1) / multiple) * multiple;
}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index 7fd9c4852a..95fcba7928 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -242,15 +242,20 @@ span.linkable-text:hover {
#left {
- float: left; height: 100%; background-color: #FFFFFF;
+ float: left;
}
#middle {
- float:left; height: 100%; background-color: #F8F8F8;
+ float:left; background-color: #F8F8F8;
}
#right {
- float: right; background-color: #FFFFFF;
+ float: right;
+}
+
+.viewpane {
+ height: 100vh;
+ background-color: #FFFFFF;
}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js
index c04384810b..0c720b22a4 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.js
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.js
@@ -65,18 +65,15 @@ class Snapper {
}
setDisassemblyExpanded(newState) {
- console.log(newState)
if (this.disassemblyExpand.classed("invisible") === newState) return;
this.disassemblyExpandUpdate(newState);
let resizer = this.resizer;
if (newState) {
resizer.sep_right = resizer.sep_right_snap;
resizer.sep_right_snap = resizer.client_width;
- console.log("set expand")
} else {
resizer.sep_right_snap = resizer.sep_right;
resizer.sep_right = resizer.client_width;
- console.log("set collapse")
}
resizer.updatePanes();
}
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index f4ff3fe1f7..4fb6aaff13 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -101,6 +101,8 @@ def run_tests(basedir, *args, **kwargs):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
+ else:
+ sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(
basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
@@ -145,7 +147,9 @@ class SystemTest(unittest.TestCase):
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
+ from testrunner.local import command
from testrunner.local import pool
+ command.setup_testing()
pool.setup_testing()
@classmethod
@@ -170,10 +174,11 @@ class SystemTest(unittest.TestCase):
'sweet/bananas',
'sweet/raspberries',
)
- self.assertIn('Running 4 tests', result.stdout, result)
+ self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
- self.assertIn('Total time:', result.stderr, result)
- self.assertIn('sweet/bananas', result.stderr, result)
+ # TODO(majeski): Implement for test processors
+ # self.assertIn('Total time:', result.stderr, result)
+ # self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
@@ -199,6 +204,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ @unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
@@ -222,7 +228,7 @@ class SystemTest(unittest.TestCase):
def testFailProc(self):
self.testFail(infra_staging=True)
- def testFail(self, infra_staging=False):
+ def testFail(self, infra_staging=True):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
@@ -269,7 +275,7 @@ class SystemTest(unittest.TestCase):
def testFailWithRerunAndJSONProc(self):
self.testFailWithRerunAndJSON(infra_staging=True)
- def testFailWithRerunAndJSON(self, infra_staging=False):
+ def testFailWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -303,12 +309,13 @@ class SystemTest(unittest.TestCase):
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
+ self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path)
def testFlakeWithRerunAndJSONProc(self):
self.testFlakeWithRerunAndJSON(infra_staging=True)
- def testFlakeWithRerunAndJSON(self, infra_staging=False):
+ def testFlakeWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -334,6 +341,7 @@ class SystemTest(unittest.TestCase):
'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path)
def testAutoDetect(self):
@@ -374,7 +382,7 @@ class SystemTest(unittest.TestCase):
def testSkipsProc(self):
self.testSkips(infra_staging=True)
- def testSkips(self, infra_staging=False):
+ def testSkips(self, infra_staging=True):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
@@ -390,12 +398,12 @@ class SystemTest(unittest.TestCase):
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertEqual(2, result.returncode, result)
def testDefaultProc(self):
self.testDefault(infra_staging=True)
- def testDefault(self, infra_staging=False):
+ def testDefault(self, infra_staging=True):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
@@ -410,14 +418,14 @@ class SystemTest(unittest.TestCase):
else:
self.assertIn('Running 0 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_base() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testGNOption(self):
"""Test using gn option, but no gn build folder is found."""
@@ -433,7 +441,7 @@ class SystemTest(unittest.TestCase):
result = run_tests(basedir, '--mode=Release')
self.assertIn('execution mode (release) for release is inconsistent '
'with build config (debug)', result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
@@ -442,13 +450,13 @@ class SystemTest(unittest.TestCase):
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--variants=meh')
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
@@ -457,6 +465,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
@@ -475,6 +484,7 @@ class SystemTest(unittest.TestCase):
result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
@@ -489,6 +499,7 @@ class SystemTest(unittest.TestCase):
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
@@ -506,7 +517,7 @@ class SystemTest(unittest.TestCase):
def testPredictableProc(self):
self.testPredictable(infra_staging=True)
- def testPredictable(self, infra_staging=False):
+ def testPredictable(self, infra_staging=True):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
@@ -547,7 +558,10 @@ class SystemTest(unittest.TestCase):
# timeout was used.
self.assertEqual(0, result.returncode, result)
- def testRandomSeedStressWithDefault(self):
+ def testRandomSeedStressWithDefaultProc(self):
+ self.testRandomSeedStressWithDefault(infra_staging=True)
+
+ def testRandomSeedStressWithDefault(self, infra_staging=True):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
@@ -557,8 +571,13 @@ class SystemTest(unittest.TestCase):
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
+ infra_staging=infra_staging,
)
- self.assertIn('Running 2 tests', result.stdout, result)
+ if infra_staging:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ else:
+ self.assertIn('Running 2 tests', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
@@ -573,7 +592,8 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'sweet/strawberries',
)
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
@@ -598,7 +618,8 @@ class SystemTest(unittest.TestCase):
)
# Both tests are either marked as running in only default or only
# slow variant.
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Running 2 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
@@ -611,7 +632,7 @@ class SystemTest(unittest.TestCase):
def testDotsProgressProc(self):
self.testDotsProgress(infra_staging=True)
- def testDotsProgress(self, infra_staging=False):
+ def testDotsProgress(self, infra_staging=True):
with temp_base() as basedir:
result = run_tests(
basedir,
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index 172b87a5d6..e889ecabce 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -41,20 +41,20 @@
"result": "FAIL",
"run": 2,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -62,40 +62,40 @@
"result": "FAIL",
"run": 3,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index 7fcfe47f71..cdb4766e95 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 0,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
@@ -41,29 +41,29 @@
"result": "PASS",
"run": 2,
"stderr": "",
- "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
"name": "sweet/bananaflakes"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
index 115471ac72..1fcf2864b6 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -10,7 +10,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
return map(
self._create_test,
['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
@@ -24,8 +24,8 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'd8_mocked.py'
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.name]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
index 9407769b35..a986af5c2f 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -10,7 +10,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
return map(
self._create_test,
['bananaflakes'],
@@ -24,8 +24,8 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'd8_mocked.py'
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.name]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index c96741a9a1..5659cdd03c 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -49,44 +49,53 @@ INSTANCE_TYPES = {
145: "FIXED_FLOAT32_ARRAY_TYPE",
146: "FIXED_FLOAT64_ARRAY_TYPE",
147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 148: "FIXED_DOUBLE_ARRAY_TYPE",
- 149: "FILLER_TYPE",
- 150: "ACCESS_CHECK_INFO_TYPE",
- 151: "ACCESSOR_INFO_TYPE",
- 152: "ACCESSOR_PAIR_TYPE",
- 153: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 154: "ALLOCATION_MEMENTO_TYPE",
- 155: "ALLOCATION_SITE_TYPE",
- 156: "ASYNC_GENERATOR_REQUEST_TYPE",
- 157: "CONTEXT_EXTENSION_TYPE",
- 158: "DEBUG_INFO_TYPE",
- 159: "FUNCTION_TEMPLATE_INFO_TYPE",
- 160: "INTERCEPTOR_INFO_TYPE",
- 161: "MODULE_INFO_ENTRY_TYPE",
- 162: "MODULE_TYPE",
- 163: "OBJECT_TEMPLATE_INFO_TYPE",
- 164: "PROMISE_REACTION_JOB_INFO_TYPE",
- 165: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
- 166: "PROTOTYPE_INFO_TYPE",
- 167: "SCRIPT_TYPE",
- 168: "STACK_FRAME_INFO_TYPE",
- 169: "TUPLE2_TYPE",
- 170: "TUPLE3_TYPE",
- 171: "FIXED_ARRAY_TYPE",
- 172: "DESCRIPTOR_ARRAY_TYPE",
- 173: "HASH_TABLE_TYPE",
- 174: "TRANSITION_ARRAY_TYPE",
- 175: "CELL_TYPE",
- 176: "CODE_DATA_CONTAINER_TYPE",
- 177: "FEEDBACK_VECTOR_TYPE",
- 178: "LOAD_HANDLER_TYPE",
- 179: "PROPERTY_ARRAY_TYPE",
- 180: "PROPERTY_CELL_TYPE",
- 181: "SHARED_FUNCTION_INFO_TYPE",
- 182: "SMALL_ORDERED_HASH_MAP_TYPE",
- 183: "SMALL_ORDERED_HASH_SET_TYPE",
- 184: "STORE_HANDLER_TYPE",
- 185: "WEAK_CELL_TYPE",
+ 148: "FIXED_BIGINT64_ARRAY_TYPE",
+ 149: "FIXED_BIGUINT64_ARRAY_TYPE",
+ 150: "FIXED_DOUBLE_ARRAY_TYPE",
+ 151: "FILLER_TYPE",
+ 152: "ACCESS_CHECK_INFO_TYPE",
+ 153: "ACCESSOR_INFO_TYPE",
+ 154: "ACCESSOR_PAIR_TYPE",
+ 155: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 156: "ALLOCATION_MEMENTO_TYPE",
+ 157: "ALLOCATION_SITE_TYPE",
+ 158: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 159: "CONTEXT_EXTENSION_TYPE",
+ 160: "DEBUG_INFO_TYPE",
+ 161: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 162: "INTERCEPTOR_INFO_TYPE",
+ 163: "MODULE_INFO_ENTRY_TYPE",
+ 164: "MODULE_TYPE",
+ 165: "OBJECT_TEMPLATE_INFO_TYPE",
+ 166: "PROMISE_CAPABILITY_TYPE",
+ 167: "PROMISE_REACTION_TYPE",
+ 168: "PROTOTYPE_INFO_TYPE",
+ 169: "SCRIPT_TYPE",
+ 170: "STACK_FRAME_INFO_TYPE",
+ 171: "TUPLE2_TYPE",
+ 172: "TUPLE3_TYPE",
+ 173: "CALLABLE_TASK_TYPE",
+ 174: "CALLBACK_TASK_TYPE",
+ 175: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 176: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 177: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 178: "FIXED_ARRAY_TYPE",
+ 179: "DESCRIPTOR_ARRAY_TYPE",
+ 180: "HASH_TABLE_TYPE",
+ 181: "SCOPE_INFO_TYPE",
+ 182: "TRANSITION_ARRAY_TYPE",
+ 183: "CELL_TYPE",
+ 184: "CODE_DATA_CONTAINER_TYPE",
+ 185: "FEEDBACK_CELL_TYPE",
+ 186: "FEEDBACK_VECTOR_TYPE",
+ 187: "LOAD_HANDLER_TYPE",
+ 188: "PROPERTY_ARRAY_TYPE",
+ 189: "PROPERTY_CELL_TYPE",
+ 190: "SHARED_FUNCTION_INFO_TYPE",
+ 191: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 192: "SMALL_ORDERED_HASH_SET_TYPE",
+ 193: "STORE_HANDLER_TYPE",
+ 194: "WEAK_CELL_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -131,35 +140,39 @@ INSTANCE_TYPES = {
1091: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
1092: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
1093: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1094: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1095: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1096: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1097: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1098: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1099: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1100: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1101: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 1102: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 1103: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 1104: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 1105: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 1106: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 1107: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 1108: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 1109: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 1110: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 1111: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 1112: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 1113: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 1114: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 1115: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 1116: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 1117: "WASM_INSTANCE_TYPE",
- 1118: "WASM_MEMORY_TYPE",
- 1119: "WASM_MODULE_TYPE",
- 1120: "WASM_TABLE_TYPE",
- 1121: "JS_BOUND_FUNCTION_TYPE",
- 1122: "JS_FUNCTION_TYPE",
+ 1094: "JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1095: "JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1096: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1097: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1098: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1099: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1100: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1101: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1102: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1103: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1104: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1105: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1106: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1107: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1108: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1109: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1110: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1111: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 1112: "JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1113: "JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1114: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1115: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1116: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 1117: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 1118: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1119: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1120: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 1121: "WASM_INSTANCE_TYPE",
+ 1122: "WASM_MEMORY_TYPE",
+ 1123: "WASM_MODULE_TYPE",
+ 1124: "WASM_TABLE_TYPE",
+ 1125: "JS_BOUND_FUNCTION_TYPE",
+ 1126: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -167,10 +180,10 @@ KNOWN_MAPS = {
0x02201: (138, "FreeSpaceMap"),
0x02251: (132, "MetaMap"),
0x022a1: (131, "NullMap"),
- 0x022f1: (172, "DescriptorArrayMap"),
- 0x02341: (171, "FixedArrayMap"),
- 0x02391: (149, "OnePointerFillerMap"),
- 0x023e1: (149, "TwoPointerFillerMap"),
+ 0x022f1: (179, "DescriptorArrayMap"),
+ 0x02341: (178, "FixedArrayMap"),
+ 0x02391: (151, "OnePointerFillerMap"),
+ 0x023e1: (151, "TwoPointerFillerMap"),
0x02431: (131, "UninitializedMap"),
0x02481: (8, "OneByteInternalizedStringMap"),
0x024d1: (131, "UndefinedMap"),
@@ -178,108 +191,116 @@ KNOWN_MAPS = {
0x02571: (131, "TheHoleMap"),
0x025c1: (131, "BooleanMap"),
0x02611: (136, "ByteArrayMap"),
- 0x02661: (171, "FixedCOWArrayMap"),
- 0x026b1: (173, "HashTableMap"),
+ 0x02661: (178, "FixedCOWArrayMap"),
+ 0x026b1: (180, "HashTableMap"),
0x02701: (128, "SymbolMap"),
0x02751: (72, "OneByteStringMap"),
- 0x027a1: (171, "ScopeInfoMap"),
- 0x027f1: (181, "SharedFunctionInfoMap"),
+ 0x027a1: (181, "ScopeInfoMap"),
+ 0x027f1: (190, "SharedFunctionInfoMap"),
0x02841: (133, "CodeMap"),
- 0x02891: (171, "FunctionContextMap"),
- 0x028e1: (175, "CellMap"),
- 0x02931: (185, "WeakCellMap"),
- 0x02981: (180, "GlobalPropertyCellMap"),
+ 0x02891: (178, "FunctionContextMap"),
+ 0x028e1: (183, "CellMap"),
+ 0x02931: (194, "WeakCellMap"),
+ 0x02981: (189, "GlobalPropertyCellMap"),
0x029d1: (135, "ForeignMap"),
- 0x02a21: (174, "TransitionArrayMap"),
- 0x02a71: (177, "FeedbackVectorMap"),
+ 0x02a21: (182, "TransitionArrayMap"),
+ 0x02a71: (186, "FeedbackVectorMap"),
0x02ac1: (131, "ArgumentsMarkerMap"),
0x02b11: (131, "ExceptionMap"),
0x02b61: (131, "TerminationExceptionMap"),
0x02bb1: (131, "OptimizedOutMap"),
0x02c01: (131, "StaleRegisterMap"),
- 0x02c51: (171, "NativeContextMap"),
- 0x02ca1: (171, "ModuleContextMap"),
- 0x02cf1: (171, "EvalContextMap"),
- 0x02d41: (171, "ScriptContextMap"),
- 0x02d91: (171, "BlockContextMap"),
- 0x02de1: (171, "CatchContextMap"),
- 0x02e31: (171, "WithContextMap"),
- 0x02e81: (171, "DebugEvaluateContextMap"),
- 0x02ed1: (171, "ScriptContextTableMap"),
- 0x02f21: (171, "ArrayListMap"),
- 0x02f71: (148, "FixedDoubleArrayMap"),
+ 0x02c51: (178, "NativeContextMap"),
+ 0x02ca1: (178, "ModuleContextMap"),
+ 0x02cf1: (178, "EvalContextMap"),
+ 0x02d41: (178, "ScriptContextMap"),
+ 0x02d91: (178, "BlockContextMap"),
+ 0x02de1: (178, "CatchContextMap"),
+ 0x02e31: (178, "WithContextMap"),
+ 0x02e81: (178, "DebugEvaluateContextMap"),
+ 0x02ed1: (178, "ScriptContextTableMap"),
+ 0x02f21: (178, "ArrayListMap"),
+ 0x02f71: (150, "FixedDoubleArrayMap"),
0x02fc1: (134, "MutableHeapNumberMap"),
- 0x03011: (173, "OrderedHashMapMap"),
- 0x03061: (173, "OrderedHashSetMap"),
- 0x030b1: (173, "NameDictionaryMap"),
- 0x03101: (173, "GlobalDictionaryMap"),
- 0x03151: (173, "NumberDictionaryMap"),
- 0x031a1: (173, "StringTableMap"),
- 0x031f1: (173, "WeakHashTableMap"),
- 0x03241: (171, "SloppyArgumentsElementsMap"),
- 0x03291: (182, "SmallOrderedHashMapMap"),
- 0x032e1: (183, "SmallOrderedHashSetMap"),
- 0x03331: (176, "CodeDataContainerMap"),
- 0x03381: (1071, "JSMessageObjectMap"),
- 0x033d1: (1057, "ExternalMap"),
- 0x03421: (137, "BytecodeArrayMap"),
- 0x03471: (171, "ModuleInfoMap"),
- 0x034c1: (175, "NoClosuresCellMap"),
- 0x03511: (175, "OneClosureCellMap"),
- 0x03561: (175, "ManyClosuresCellMap"),
- 0x035b1: (179, "PropertyArrayMap"),
- 0x03601: (130, "BigIntMap"),
- 0x03651: (106, "NativeSourceStringMap"),
- 0x036a1: (64, "StringMap"),
- 0x036f1: (73, "ConsOneByteStringMap"),
- 0x03741: (65, "ConsStringMap"),
- 0x03791: (77, "ThinOneByteStringMap"),
- 0x037e1: (69, "ThinStringMap"),
- 0x03831: (67, "SlicedStringMap"),
- 0x03881: (75, "SlicedOneByteStringMap"),
- 0x038d1: (66, "ExternalStringMap"),
- 0x03921: (82, "ExternalStringWithOneByteDataMap"),
- 0x03971: (74, "ExternalOneByteStringMap"),
- 0x039c1: (98, "ShortExternalStringMap"),
- 0x03a11: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x03a61: (0, "InternalizedStringMap"),
- 0x03ab1: (2, "ExternalInternalizedStringMap"),
- 0x03b01: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x03b51: (10, "ExternalOneByteInternalizedStringMap"),
- 0x03ba1: (34, "ShortExternalInternalizedStringMap"),
- 0x03bf1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x03c41: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x03c91: (106, "ShortExternalOneByteStringMap"),
- 0x03ce1: (140, "FixedUint8ArrayMap"),
- 0x03d31: (139, "FixedInt8ArrayMap"),
- 0x03d81: (142, "FixedUint16ArrayMap"),
- 0x03dd1: (141, "FixedInt16ArrayMap"),
- 0x03e21: (144, "FixedUint32ArrayMap"),
- 0x03e71: (143, "FixedInt32ArrayMap"),
- 0x03ec1: (145, "FixedFloat32ArrayMap"),
- 0x03f11: (146, "FixedFloat64ArrayMap"),
- 0x03f61: (147, "FixedUint8ClampedArrayMap"),
- 0x03fb1: (169, "Tuple2Map"),
- 0x04001: (167, "ScriptMap"),
- 0x04051: (160, "InterceptorInfoMap"),
- 0x040a1: (151, "AccessorInfoMap"),
- 0x040f1: (150, "AccessCheckInfoMap"),
- 0x04141: (152, "AccessorPairMap"),
- 0x04191: (153, "AliasedArgumentsEntryMap"),
- 0x041e1: (154, "AllocationMementoMap"),
- 0x04231: (155, "AllocationSiteMap"),
- 0x04281: (156, "AsyncGeneratorRequestMap"),
- 0x042d1: (157, "ContextExtensionMap"),
- 0x04321: (158, "DebugInfoMap"),
- 0x04371: (159, "FunctionTemplateInfoMap"),
- 0x043c1: (161, "ModuleInfoEntryMap"),
- 0x04411: (162, "ModuleMap"),
- 0x04461: (163, "ObjectTemplateInfoMap"),
- 0x044b1: (164, "PromiseReactionJobInfoMap"),
- 0x04501: (165, "PromiseResolveThenableJobInfoMap"),
- 0x04551: (166, "PrototypeInfoMap"),
- 0x045a1: (168, "StackFrameInfoMap"),
- 0x045f1: (170, "Tuple3Map"),
+ 0x03011: (180, "OrderedHashMapMap"),
+ 0x03061: (180, "OrderedHashSetMap"),
+ 0x030b1: (180, "NameDictionaryMap"),
+ 0x03101: (180, "GlobalDictionaryMap"),
+ 0x03151: (180, "NumberDictionaryMap"),
+ 0x031a1: (180, "SimpleNumberDictionaryMap"),
+ 0x031f1: (180, "StringTableMap"),
+ 0x03241: (180, "WeakHashTableMap"),
+ 0x03291: (178, "SloppyArgumentsElementsMap"),
+ 0x032e1: (191, "SmallOrderedHashMapMap"),
+ 0x03331: (192, "SmallOrderedHashSetMap"),
+ 0x03381: (184, "CodeDataContainerMap"),
+ 0x033d1: (1071, "JSMessageObjectMap"),
+ 0x03421: (1057, "ExternalMap"),
+ 0x03471: (137, "BytecodeArrayMap"),
+ 0x034c1: (178, "ModuleInfoMap"),
+ 0x03511: (185, "NoClosuresCellMap"),
+ 0x03561: (185, "OneClosureCellMap"),
+ 0x035b1: (185, "ManyClosuresCellMap"),
+ 0x03601: (188, "PropertyArrayMap"),
+ 0x03651: (130, "BigIntMap"),
+ 0x036a1: (106, "NativeSourceStringMap"),
+ 0x036f1: (64, "StringMap"),
+ 0x03741: (73, "ConsOneByteStringMap"),
+ 0x03791: (65, "ConsStringMap"),
+ 0x037e1: (77, "ThinOneByteStringMap"),
+ 0x03831: (69, "ThinStringMap"),
+ 0x03881: (67, "SlicedStringMap"),
+ 0x038d1: (75, "SlicedOneByteStringMap"),
+ 0x03921: (66, "ExternalStringMap"),
+ 0x03971: (82, "ExternalStringWithOneByteDataMap"),
+ 0x039c1: (74, "ExternalOneByteStringMap"),
+ 0x03a11: (98, "ShortExternalStringMap"),
+ 0x03a61: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x03ab1: (0, "InternalizedStringMap"),
+ 0x03b01: (2, "ExternalInternalizedStringMap"),
+ 0x03b51: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x03ba1: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03bf1: (34, "ShortExternalInternalizedStringMap"),
+ 0x03c41: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x03c91: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03ce1: (106, "ShortExternalOneByteStringMap"),
+ 0x03d31: (140, "FixedUint8ArrayMap"),
+ 0x03d81: (139, "FixedInt8ArrayMap"),
+ 0x03dd1: (142, "FixedUint16ArrayMap"),
+ 0x03e21: (141, "FixedInt16ArrayMap"),
+ 0x03e71: (144, "FixedUint32ArrayMap"),
+ 0x03ec1: (143, "FixedInt32ArrayMap"),
+ 0x03f11: (145, "FixedFloat32ArrayMap"),
+ 0x03f61: (146, "FixedFloat64ArrayMap"),
+ 0x03fb1: (147, "FixedUint8ClampedArrayMap"),
+ 0x04001: (149, "FixedBigUint64ArrayMap"),
+ 0x04051: (148, "FixedBigInt64ArrayMap"),
+ 0x040a1: (171, "Tuple2Map"),
+ 0x040f1: (169, "ScriptMap"),
+ 0x04141: (162, "InterceptorInfoMap"),
+ 0x04191: (153, "AccessorInfoMap"),
+ 0x041e1: (152, "AccessCheckInfoMap"),
+ 0x04231: (154, "AccessorPairMap"),
+ 0x04281: (155, "AliasedArgumentsEntryMap"),
+ 0x042d1: (156, "AllocationMementoMap"),
+ 0x04321: (157, "AllocationSiteMap"),
+ 0x04371: (158, "AsyncGeneratorRequestMap"),
+ 0x043c1: (159, "ContextExtensionMap"),
+ 0x04411: (160, "DebugInfoMap"),
+ 0x04461: (161, "FunctionTemplateInfoMap"),
+ 0x044b1: (163, "ModuleInfoEntryMap"),
+ 0x04501: (164, "ModuleMap"),
+ 0x04551: (165, "ObjectTemplateInfoMap"),
+ 0x045a1: (166, "PromiseCapabilityMap"),
+ 0x045f1: (167, "PromiseReactionMap"),
+ 0x04641: (168, "PrototypeInfoMap"),
+ 0x04691: (170, "StackFrameInfoMap"),
+ 0x046e1: (172, "Tuple3Map"),
+ 0x04731: (173, "CallableTaskMap"),
+ 0x04781: (174, "CallbackTaskMap"),
+ 0x047d1: (175, "PromiseFulfillReactionJobTaskMap"),
+ 0x04821: (176, "PromiseRejectReactionJobTaskMap"),
+ 0x04871: (177, "PromiseResolveThenableJobTaskMap"),
}
# List of known V8 objects.
@@ -311,24 +332,24 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x02721): "EmptyFixedFloat32Array",
("OLD_SPACE", 0x02741): "EmptyFixedFloat64Array",
("OLD_SPACE", 0x02761): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x02781): "EmptyScript",
- ("OLD_SPACE", 0x02809): "UndefinedCell",
- ("OLD_SPACE", 0x02819): "EmptySloppyArgumentsElements",
- ("OLD_SPACE", 0x02839): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x02881): "EmptyOrderedHashMap",
- ("OLD_SPACE", 0x028a9): "EmptyOrderedHashSet",
- ("OLD_SPACE", 0x028d1): "EmptyPropertyCell",
- ("OLD_SPACE", 0x028f9): "EmptyWeakCell",
- ("OLD_SPACE", 0x02969): "NoElementsProtector",
- ("OLD_SPACE", 0x02991): "IsConcatSpreadableProtector",
- ("OLD_SPACE", 0x029a1): "SpeciesProtector",
- ("OLD_SPACE", 0x029c9): "StringLengthProtector",
- ("OLD_SPACE", 0x029d9): "FastArrayIterationProtector",
- ("OLD_SPACE", 0x029e9): "ArrayIteratorProtector",
- ("OLD_SPACE", 0x02a11): "ArrayBufferNeuteringProtector",
- ("OLD_SPACE", 0x02a39): "InfinityValue",
- ("OLD_SPACE", 0x02a49): "MinusZeroValue",
- ("OLD_SPACE", 0x02a59): "MinusInfinityValue",
+ ("OLD_SPACE", 0x027c1): "EmptyScript",
+ ("OLD_SPACE", 0x02849): "ManyClosuresCell",
+ ("OLD_SPACE", 0x02859): "EmptySloppyArgumentsElements",
+ ("OLD_SPACE", 0x02879): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x028c1): "EmptyOrderedHashMap",
+ ("OLD_SPACE", 0x028e9): "EmptyOrderedHashSet",
+ ("OLD_SPACE", 0x02911): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x02939): "EmptyWeakCell",
+ ("OLD_SPACE", 0x029a9): "NoElementsProtector",
+ ("OLD_SPACE", 0x029d1): "IsConcatSpreadableProtector",
+ ("OLD_SPACE", 0x029e1): "SpeciesProtector",
+ ("OLD_SPACE", 0x02a09): "StringLengthProtector",
+ ("OLD_SPACE", 0x02a19): "FastArrayIterationProtector",
+ ("OLD_SPACE", 0x02a29): "ArrayIteratorProtector",
+ ("OLD_SPACE", 0x02a51): "ArrayBufferNeuteringProtector",
+ ("OLD_SPACE", 0x02ac9): "InfinityValue",
+ ("OLD_SPACE", 0x02ad9): "MinusZeroValue",
+ ("OLD_SPACE", 0x02ae9): "MinusInfinityValue",
}
# List of known V8 Frame Markers.
diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/tools/verify_source_deps.py
deleted file mode 100755
index c49d51ab5d..0000000000
--- a/deps/v8/tools/verify_source_deps.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Script to print potentially missing source dependencies based on the actual
-.h and .cc files in the source tree and which files are included in the gyp
-and gn files. The latter inclusion is overapproximated.
-
-TODO(machenbach): If two source files with the same name exist, but only one
-is referenced from a gyp/gn file, we won't necessarily detect it.
-"""
-
-import itertools
-import re
-import os
-import subprocess
-import sys
-
-
-V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-
-GYP_FILES = [
- os.path.join(V8_BASE, 'src', 'd8.gyp'),
- os.path.join(V8_BASE, 'src', 'v8.gyp'),
- os.path.join(V8_BASE, 'src', 'inspector', 'inspector.gypi'),
- os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
- os.path.join(V8_BASE, 'samples', 'samples.gyp'),
- os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
- os.path.join(V8_BASE, 'test', 'fuzzer', 'fuzzer.gyp'),
- os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
- os.path.join(V8_BASE, 'test', 'inspector', 'inspector.gyp'),
- os.path.join(V8_BASE, 'test', 'mkgrokdump', 'mkgrokdump.gyp'),
- os.path.join(V8_BASE, 'testing', 'gmock.gyp'),
- os.path.join(V8_BASE, 'testing', 'gtest.gyp'),
- os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
-]
-
-ALL_GYP_PREFIXES = [
- '..',
- 'common',
- os.path.join('src', 'third_party', 'vtune'),
- 'src',
- 'samples',
- 'testing',
- 'tools',
- os.path.join('test', 'cctest'),
- os.path.join('test', 'common'),
- os.path.join('test', 'fuzzer'),
- os.path.join('test', 'unittests'),
- os.path.join('test', 'inspector'),
- os.path.join('test', 'mkgrokdump'),
-]
-
-GYP_UNSUPPORTED_FEATURES = [
- 'gcmole',
- 'setup-isolate-deserialize.cc',
- 'v8-version.h'
-]
-
-GN_FILES = [
- os.path.join(V8_BASE, 'BUILD.gn'),
- os.path.join(V8_BASE, 'build', 'secondary', 'testing', 'gmock', 'BUILD.gn'),
- os.path.join(V8_BASE, 'build', 'secondary', 'testing', 'gtest', 'BUILD.gn'),
- os.path.join(V8_BASE, 'src', 'inspector', 'BUILD.gn'),
- os.path.join(V8_BASE, 'test', 'cctest', 'BUILD.gn'),
- os.path.join(V8_BASE, 'test', 'unittests', 'BUILD.gn'),
- os.path.join(V8_BASE, 'test', 'inspector', 'BUILD.gn'),
- os.path.join(V8_BASE, 'test', 'mkgrokdump', 'BUILD.gn'),
- os.path.join(V8_BASE, 'tools', 'BUILD.gn'),
-]
-
-GN_UNSUPPORTED_FEATURES = [
- 'aix',
- 'cygwin',
- 'freebsd',
- 'gcmole',
- 'openbsd',
- 'ppc',
- 'qnx',
- 'solaris',
- 'vtune',
- 'v8-version.h',
-]
-
-ALL_GN_PREFIXES = [
- '..',
- os.path.join('src', 'inspector'),
- 'src',
- 'testing',
- os.path.join('test', 'cctest'),
- os.path.join('test', 'unittests'),
- os.path.join('test', 'inspector'),
- os.path.join('test', 'mkgrokdump'),
-]
-
-def pathsplit(path):
- return re.split('[/\\\\]', path)
-
-def path_no_prefix(path, prefixes):
- for prefix in prefixes:
- if path.startswith(prefix + os.sep):
- return path_no_prefix(path[len(prefix) + 1:], prefixes)
- return path
-
-
-def isources(prefixes):
- cmd = ['git', 'ls-tree', '-r', 'HEAD', '--full-name', '--name-only']
- for f in subprocess.check_output(cmd, universal_newlines=True).split('\n'):
- if not (f.endswith('.h') or f.endswith('.cc')):
- continue
- yield path_no_prefix(os.path.join(*pathsplit(f)), prefixes)
-
-
-def iflatten(obj):
- if isinstance(obj, dict):
- for value in obj.values():
- for i in iflatten(value):
- yield i
- elif isinstance(obj, list):
- for value in obj:
- for i in iflatten(value):
- yield i
- elif isinstance(obj, basestring):
- yield path_no_prefix(os.path.join(*pathsplit(obj)), ALL_GYP_PREFIXES)
-
-
-def iflatten_gyp_file(gyp_file):
- """Overaproximates all values in the gyp file.
-
- Iterates over all string values recursively. Removes '../' path prefixes.
- """
- with open(gyp_file) as f:
- return iflatten(eval(f.read()))
-
-
-def iflatten_gn_file(gn_file):
- """Overaproximates all values in the gn file.
-
- Iterates over all double quoted strings.
- """
- with open(gn_file) as f:
- for line in f.read().splitlines():
- match = re.match(r'.*"([^"]*)".*', line)
- if match:
- yield path_no_prefix(
- os.path.join(*pathsplit(match.group(1))), ALL_GN_PREFIXES)
-
-
-def icheck_values(values, prefixes):
- for source_file in isources(prefixes):
- if source_file not in values:
- yield source_file
-
-
-def missing_gyp_files():
- gyp_values = set(itertools.chain(
- *[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES]
- ))
- gyp_files = sorted(icheck_values(gyp_values, ALL_GYP_PREFIXES))
- return filter(
- lambda x: not any(i in x for i in GYP_UNSUPPORTED_FEATURES), gyp_files)
-
-
-def missing_gn_files():
- gn_values = set(itertools.chain(
- *[iflatten_gn_file(gn_file) for gn_file in GN_FILES]
- ))
-
- gn_files = sorted(icheck_values(gn_values, ALL_GN_PREFIXES))
- return filter(
- lambda x: not any(i in x for i in GN_UNSUPPORTED_FEATURES), gn_files)
-
-
-def main():
- print "----------- Files not in gyp: ------------"
- for i in missing_gyp_files():
- print i
-
- print "\n----------- Files not in gn: -------------"
- for i in missing_gn_files():
- print i
- return 0
-
-if '__main__' == __name__:
- sys.exit(main())
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index c4d18a3333..92aaa8fd3c 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -28,6 +28,9 @@ mkdir ${SPEC_TEST_DIR}/tmp
./tools/dev/gm.py x64.release d8
cd ${V8_DIR}/test/wasm-js/interpreter
+
+# The next step requires that ocaml is installed. See the README.md in
+# ${V8_DIR}/test/wasm-js/interpreter/.
make clean all
cd ${V8_DIR}/test/wasm-js/test/core
@@ -42,4 +45,11 @@ echo
echo "The following files will get uploaded:"
ls tests
echo
+
+# For the following command you first have to authenticate with google cloud
+# storage. For that you have to execute
+#
+# > gsutil.py config
+#
+# When the script asks you for your project-id, use 0.
upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 83f006688c..2367b2ccc8 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,4 +7,6 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly...
-The bartender starts to shake the bottles.......................
+The bartender starts to shake the bottles........
+.
+.