summaryrefslogtreecommitdiff
path: root/deps/v8/tools
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-01 08:38:30 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-01 12:53:56 +0200
commit2dcc3665abf57c3607cebffdeeca062f5894885d (patch)
tree4f560748132edcfb4c22d6f967a7e80d23d7ea2c /deps/v8/tools
parent1ee47d550c6de132f06110aa13eceb7551d643b3 (diff)
downloadandroid-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.gz
android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.bz2
android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.zip
deps: update V8 to 7.6.303.28
PR-URL: https://github.com/nodejs/node/pull/28016 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Refael Ackermann (רפאל פלחי) <refack@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Diffstat (limited to 'deps/v8/tools')
-rw-r--r--deps/v8/tools/BUILD.gn1
-rw-r--r--deps/v8/tools/OWNERS4
-rwxr-xr-xdeps/v8/tools/bash-completion.sh6
-rw-r--r--deps/v8/tools/cfi/blacklist.txt2
-rwxr-xr-xdeps/v8/tools/check-static-initializers.sh4
-rw-r--r--deps/v8/tools/clusterfuzz/OWNERS4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt7
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py13
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py2
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py7
-rw-r--r--deps/v8/tools/clusterfuzz/v8_sanity_checks.js3
-rw-r--r--deps/v8/tools/codemap.js14
-rwxr-xr-xdeps/v8/tools/dev/gm.py10
-rw-r--r--deps/v8/tools/dumpcpp-driver.js3
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn2
-rw-r--r--deps/v8/tools/gcmole/README2
-rw-r--r--deps/v8/tools/gcmole/gcmole-test.cc6
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/gcmole/package.sh0
-rwxr-xr-xdeps/v8/tools/gcmole/run-gcmole.py2
-rw-r--r--deps/v8/tools/gdbinit85
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py23
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py4
-rwxr-xr-xdeps/v8/tools/js2c.py13
-rwxr-xr-xdeps/v8/tools/mb/mb.py54
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py26
-rwxr-xr-xdeps/v8/tools/node/build_gn.py143
-rwxr-xr-xdeps/v8/tools/node/test_update_node.py125
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/.gitignore7
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/delete_me1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h20
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/.gitignore3
-rw-r--r--deps/v8/tools/node/testdata/v8/base/trace_event/common/common0
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/new/v8_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_new1
-rwxr-xr-xdeps/v8/tools/node/update_node.py180
-rw-r--r--deps/v8/tools/profviz/worker.js2
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/run-wasm-api-tests.py16
-rw-r--r--[-rwxr-xr-x]deps/v8/tools/run_perf.py937
-rw-r--r--deps/v8/tools/shell-utils.h2
-rw-r--r--deps/v8/tools/testrunner/OWNERS4
-rw-r--r--deps/v8/tools/testrunner/base_runner.py27
-rw-r--r--deps/v8/tools/testrunner/local/command.py9
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py4
-rw-r--r--deps/v8/tools/testrunner/objects/output.py16
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py9
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py5
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py63
-rw-r--r--deps/v8/tools/tick-processor.html3
-rw-r--r--deps/v8/tools/tickprocessor-driver.js3
-rw-r--r--deps/v8/tools/tickprocessor.js15
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py21
-rwxr-xr-xdeps/v8/tools/torque/make-torque-parser.py71
-rw-r--r--deps/v8/tools/torque/vim-torque/syntax/torque.vim2
-rw-r--r--deps/v8/tools/torque/vscode-torque/package.json8
-rw-r--r--deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json350
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py277
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json1
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py8
-rw-r--r--deps/v8/tools/v8heapconst.py462
-rw-r--r--deps/v8/tools/vim/ninja-build.vim5
-rw-r--r--deps/v8/tools/wasm-compilation-hints/OWNERS2
-rwxr-xr-xdeps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py60
-rwxr-xr-xdeps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py47
-rw-r--r--deps/v8/tools/wasm-compilation-hints/wasm.py108
-rw-r--r--deps/v8/tools/whitespace.txt2
79 files changed, 1560 insertions, 1822 deletions
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 7b019ad0b2..e6fd743715 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -43,6 +43,7 @@ group("v8_testrunner") {
testonly = true
data_deps = [
+ "..:v8_python_base",
"..:v8_dump_build_config",
":v8_android_test_runner_deps",
]
diff --git a/deps/v8/tools/OWNERS b/deps/v8/tools/OWNERS
index 85f514c4ab..bdb1d555a4 100644
--- a/deps/v8/tools/OWNERS
+++ b/deps/v8/tools/OWNERS
@@ -1,3 +1 @@
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index 5b9f7f5073..27e73b7ad6 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -37,11 +37,11 @@ v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
_v8_flag() {
local cur defines targets
cur="${COMP_WORDS[COMP_CWORD]}"
- defines=$(cat $v8_source/src/flag-definitions.h \
+ defines=$(cat $v8_source/src/flags/flag-definitions.h \
| grep "^DEFINE" \
| grep -v "DEFINE_IMPLICATION" \
| sed -e 's/_/-/g'; \
- cat $v8_source/src/flag-definitions.h \
+ cat $v8_source/src/flags/flag-definitions.h \
| grep "^ V(harmony_" \
| sed -e 's/^ V/DEFINE-BOOL/' \
| sed -e 's/_/-/g')
@@ -49,7 +49,7 @@ _v8_flag() {
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
| sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
- cat $v8_source/src/d8.cc \
+ cat $v8_source/src/d8/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
COMPREPLY=($(compgen -W "$targets" -- "$cur"))
diff --git a/deps/v8/tools/cfi/blacklist.txt b/deps/v8/tools/cfi/blacklist.txt
index c1571b8c65..9886fd37fb 100644
--- a/deps/v8/tools/cfi/blacklist.txt
+++ b/deps/v8/tools/cfi/blacklist.txt
@@ -15,7 +15,7 @@ type:std::*
fun:*LocaleConvertCase*
# PropertyCallbackArguments::Call methods cast function pointers
-src:*src/api-arguments-inl.h
+src:*src/api/api-arguments-inl.h
# v8 callback that casts argument template parameters
fun:*PendingPhantomCallback*Invoke*
diff --git a/deps/v8/tools/check-static-initializers.sh b/deps/v8/tools/check-static-initializers.sh
index da43170f6e..fdd1e8417d 100755
--- a/deps/v8/tools/check-static-initializers.sh
+++ b/deps/v8/tools/check-static-initializers.sh
@@ -30,8 +30,8 @@
# initializer in d8 matches the one defined below.
# Allow:
-# - _GLOBAL__I__ZN2v810LineEditor6first_E
-# - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE
+# _GLOBAL__sub_I_d8.cc
+# _GLOBAL__sub_I_iostream.cpp
expected_static_init_count=2
v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
diff --git a/deps/v8/tools/clusterfuzz/OWNERS b/deps/v8/tools/clusterfuzz/OWNERS
index c8693c972c..50b5741785 100644
--- a/deps/v8/tools/clusterfuzz/OWNERS
+++ b/deps/v8/tools/clusterfuzz/OWNERS
@@ -1,5 +1,3 @@
set noparent
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index 27440742e8..dae84cbbb1 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
index 72bf95d0b1..fa3d672f00 100644
--- a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -2,16 +2,16 @@
# V8 correctness failure
# V8 correctness configs: x64,ignition:x64,ignition_turbo
# V8 correctness sources: sanity check failed
-# V8 correctness suppression:
+# V8 correctness suppression:
#
# CHECK
#
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
#
# Difference:
- unknown
@@ -44,3 +44,4 @@ not unknown
### End of configuration x64,ignition_turbo
+
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 26b189e27f..159fea9496 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -43,7 +43,17 @@ CONFIGS = dict(
'--no-lazy',
'--no-lazy-inner-functions',
],
+ ignition_no_ic=[
+ '--turbo-filter=~',
+ '--noopt',
+ '--liftoff',
+ '--no-wasm-tier-up',
+ '--no-use-ic',
+ ],
ignition_turbo=[],
+ ignition_turbo_no_ic=[
+ '--no-use-ic',
+ ],
ignition_turbo_opt=[
'--always-opt',
'--no-liftoff',
@@ -86,6 +96,7 @@ ADDITIONAL_FLAGS = [
(0.01, '--thread-pool-size=2'),
(0.01, '--thread-pool-size=4'),
(0.01, '--thread-pool-size=8'),
+ (0.1, '--interrupt-budget=1000'),
]
# Timeout in seconds for one d8 run.
@@ -103,7 +114,7 @@ PREAMBLE = [
ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
SANITY_CHECKS = os.path.join(BASE_PATH, 'v8_sanity_checks.js')
-FLAGS = ['--abort-on-stack-or-string-length-overflow', '--expose-gc',
+FLAGS = ['--correctness-fuzzer-suppressions', '--expose-gc',
'--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
'--es-staging', '--no-wasm-async-compilation',
'--suppress-asm-messages']
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index e9559f6e0c..b13d3d7677 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -39,7 +39,7 @@ class ConfigTest(unittest.TestCase):
self.assertEqual(
[
'--first-config=ignition',
- '--second-config=ignition_turbo',
+ '--second-config=ignition_turbo_no_ic',
'--second-d8=d8',
],
v8_fuzz_config.Config('foo', Rng()).choose_foozzie_flags(),
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index 39e983f74a..1cd353225b 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -11,8 +11,11 @@ FOOZZIE_EXPERIMENTS = [
[10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
- [30, 'ignition', 'ignition_turbo', 'd8'],
- [20, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [10, 'ignition', 'ignition_turbo', 'd8'],
+ [10, 'ignition_no_ic', 'ignition_turbo', 'd8'],
+ [10, 'ignition', 'ignition_turbo_no_ic', 'd8'],
+ [10, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [10, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
[5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
[5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x86/d8'],
diff --git a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
index 2b7cb65a1b..1b682432ce 100644
--- a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
+++ b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -17,6 +17,5 @@ print("https://crbug.com/935800");
function baz() {}
return {bar: baz};
}
- // TODO(mstarzinger): Uncomment once https://crbug.com/935800 is resolved.
- // print(Object.getOwnPropertyNames(foo().bar));
+ print(Object.getOwnPropertyNames(foo().bar));
})();
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
index 4c185b0464..df6770f9a8 100644
--- a/deps/v8/tools/codemap.js
+++ b/deps/v8/tools/codemap.js
@@ -178,15 +178,6 @@ CodeMap.prototype.findInTree_ = function(tree, addr) {
return node && this.isAddressBelongsTo_(addr, node) ? node : null;
};
-/**
- * Embedded builtins are located in the shared library but should be attributed
- * according to the dynamically generated code-create events.
- *
- * @private
- */
-CodeMap.prototype.isIsolateIndependentBuiltin_ = function(entry) {
- return entry.type == "CPP" && /v8_\w*embedded_blob_/.test(entry.name);
-};
/**
* Finds a code entry that contains the specified address. Both static and
@@ -205,10 +196,7 @@ CodeMap.prototype.findAddress = function(addr) {
result = this.findInTree_(this.libraries_, addr);
if (!result) return null;
}
- if (!this.isIsolateIndependentBuiltin_(result.value)) {
- // Embedded builtins are handled in the following dynamic section.
- return { entry : result.value, offset : addr - result.key };
- }
+ return { entry : result.value, offset : addr - result.key };
}
var min = this.dynamics_.findMin();
var max = this.dynamics_.findMax();
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index bc808c31ae..0e01f4f8d7 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -43,8 +43,8 @@ MODES = ["release", "debug", "optdebug"]
# Modes that get built/run when you don't specify any.
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
-TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump",
- "generate-bytecode-expectations", "inspector-test"]
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "wasm_api_tests", "wee8",
+ "mkgrokdump", "generate-bytecode-expectations", "inspector-test"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
@@ -64,13 +64,14 @@ ACTIONS = {
HELP = """<arch> can be any of: %(arches)s
<mode> can be any of: %(modes)s
<target> can be any of:
- - cctest, d8, unittests, v8_fuzzers (build respective binary)
+ - %(targets)s (build respective binary)
- all (build all binaries)
- tests (build test binaries)
- check (build test binaries, run most tests)
- checkall (build all binaries, run more tests)
""" % {"arches": " ".join(ARCHES),
- "modes": " ".join(MODES)}
+ "modes": " ".join(MODES),
+ "targets": ", ".join(TARGETS)}
TESTSUITES_TARGETS = {"benchmarks": "d8",
"cctest": "cctest",
@@ -84,6 +85,7 @@ TESTSUITES_TARGETS = {"benchmarks": "d8",
"preparser": "d8",
"test262": "d8",
"unittests": "unittests",
+ "wasm-api-tests": "wasm_api_tests",
"webkit": "d8"}
OUTDIR = "out"
diff --git a/deps/v8/tools/dumpcpp-driver.js b/deps/v8/tools/dumpcpp-driver.js
index 44527771e4..6073dea738 100644
--- a/deps/v8/tools/dumpcpp-driver.js
+++ b/deps/v8/tools/dumpcpp-driver.js
@@ -39,7 +39,8 @@ if (params.sourceMap) {
}
var cppProcessor = new CppProcessor(
- new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+ new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+ params.apkEmbeddedLibrary),
params.timedRange, params.pairwiseTimedRange);
cppProcessor.processLogFile(params.logFileName);
cppProcessor.dumpCppSymbols();
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index f10667e6c2..2ef4472207 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -24,6 +24,8 @@ group("v8_run_gcmole") {
"../../testing/gtest/include/gtest/gtest_prod.h",
"../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
"../../third_party/icu/source/",
+ "../../third_party/wasm-api/wasm.h",
+ "../../third_party/wasm-api/wasm.hh",
"$target_gen_dir/../../",
"$target_gen_dir/../../torque-generated/",
]
diff --git a/deps/v8/tools/gcmole/README b/deps/v8/tools/gcmole/README
index 7e25da3aa1..578ea56219 100644
--- a/deps/v8/tools/gcmole/README
+++ b/deps/v8/tools/gcmole/README
@@ -27,7 +27,7 @@ PREREQUISITES -----------------------------------------------------------------
Follow the instructions on http://clang.llvm.org/get_started.html.
- Make sure to pass -DCMAKE_BUILD_TYPE=Release to cmake to get Release build
+ Make sure to pass -DCMAKE_BUILD_TYPE=Release to cmake to get Release build
instead of a Debug one.
(3) Build gcmole Clang plugin (libgcmole.so)
diff --git a/deps/v8/tools/gcmole/gcmole-test.cc b/deps/v8/tools/gcmole/gcmole-test.cc
index b0a341bb55..c00c6e5539 100644
--- a/deps/v8/tools/gcmole/gcmole-test.cc
+++ b/deps/v8/tools/gcmole/gcmole-test.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/handles-inl.h"
-#include "src/handles.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/handles.h"
#include "src/objects/maybe-object.h"
#include "src/objects/object-macros.h"
diff --git a/deps/v8/tools/gcmole/package.sh b/deps/v8/tools/gcmole/package.sh
index 6206e7bb2e..6206e7bb2e 100644..100755
--- a/deps/v8/tools/gcmole/package.sh
+++ b/deps/v8/tools/gcmole/package.sh
diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py
index 76a6b55d44..6f2a091c3c 100755
--- a/deps/v8/tools/gcmole/run-gcmole.py
+++ b/deps/v8/tools/gcmole/run-gcmole.py
@@ -21,7 +21,7 @@ BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
assert len(sys.argv) == 2
-if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-from-dsl.h"):
+if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-tq.h"):
print("Expected generated headers in out/Release/gen.")
print("Either build v8 in out/Release or change gcmole.lua:115")
sys.exit(-1)
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 6c3778fca7..a91554c3fa 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -163,3 +163,88 @@ def dcheck_stop_handler(event):
gdb.events.stop.connect(dcheck_stop_handler)
end
+
+# Code imported from chromium/src/tools/gdb/gdbinit
+python
+
+import os
+import subprocess
+import sys
+
+compile_dirs = set()
+
+
+def get_current_debug_file_directories():
+ dir = gdb.execute("show debug-file-directory", to_string=True)
+ dir = dir[
+ len('The directory where separate debug symbols are searched for is "'
+ ):-len('".') - 1]
+ return set(dir.split(":"))
+
+
+def add_debug_file_directory(dir):
+ # gdb has no function to add debug-file-directory, simulates that by using
+ # `show debug-file-directory` and `set debug-file-directory <directories>`.
+ current_dirs = get_current_debug_file_directories()
+ current_dirs.add(dir)
+ gdb.execute(
+ "set debug-file-directory %s" % ":".join(current_dirs), to_string=True)
+
+
+def load_libcxx_pretty_printers(src_dir):
+ libcxx_pretty_printers = os.path.join(src_dir, 'third_party',
+ 'libcxx-pretty-printers')
+ if not os.path.isdir(libcxx_pretty_printers):
+ return
+ sys.path.insert(1, libcxx_pretty_printers)
+ from printers import register_libcxx_printers
+ register_libcxx_printers(None)
+
+
+def load_gdb_chrome(src_dir):
+ tools_gdb = os.path.join(src_dir, 'tools', 'gdb')
+
+ sys.path.insert(1, tools_gdb)
+ import gdb_chrome
+
+ gdb.execute('source %s' % os.path.join(tools_gdb, 'viewg.gdb'))
+
+
+def newobj_handler(event):
+ global compile_dirs
+ compile_dir = os.path.dirname(event.new_objfile.filename)
+ if not compile_dir:
+ return
+ if compile_dir in compile_dirs:
+ return
+ compile_dirs.add(compile_dir)
+
+ # Add source path
+ gdb.execute("dir %s" % compile_dir)
+
+ # Need to tell the location of .dwo files.
+ # https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
+ # https://crbug.com/603286#c35
+ add_debug_file_directory(compile_dir)
+
+ git = subprocess.Popen(
+ ['git', '-C', compile_dir, 'rev-parse', '--show-toplevel'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ src_dir, _ = git.communicate()
+ if git.returncode:
+ return
+ src_dir = str(src_dir).rstrip()
+
+ load_libcxx_pretty_printers(src_dir)
+
+ load_gdb_chrome(src_dir)
+
+
+# Event hook for newly loaded objfiles.
+# https://sourceware.org/gdb/onlinedocs/gdb/Events-In-Python.html
+gdb.events.new_objfile.connect(newobj_handler)
+
+gdb.execute("set environment CHROMIUM_GDBINIT_SOURCED=1")
+
+end
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 48265b2418..1c10eb4443 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -85,10 +85,6 @@ consts_misc = [
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
- { 'name': 'SystemPointerSize', 'value': 'kSystemPointerSize' },
- { 'name': 'SystemPointerSizeLog2', 'value': 'kSystemPointerSizeLog2' },
- { 'name': 'TaggedSize', 'value': 'kTaggedSize' },
- { 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
@@ -165,8 +161,6 @@ consts_misc = [
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
- { 'name': 'class_Map__instance_descriptors_offset',
- 'value': 'Map::kDescriptorsOffset' },
{ 'name': 'off_fp_context_or_frame_type',
'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
@@ -246,7 +240,6 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
'FixedArray, data, uintptr_t, kHeaderSize',
- 'FixedTypedArrayBase, external_pointer, uintptr_t, kExternalPointerOffset',
'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
@@ -304,11 +297,11 @@ header = '''
* This file is generated by %s. Do not edit directly.
*/
-#include "src/v8.h"
-#include "src/frames.h"
-#include "src/frames-inl.h" /* for architecture-specific frame constants */
-#include "src/contexts.h"
-#include "src/objects.h"
+#include "src/init/v8.h"
+#include "src/execution/frames.h"
+#include "src/execution/frames-inl.h" /* for architecture-specific frame constants */
+#include "src/objects/contexts.h"
+#include "src/objects/objects.h"
#include "src/objects/data-handler.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-string-iterator.h"
@@ -408,7 +401,11 @@ def load_objects_from_file(objfilename, checktypes):
klass = match.group(1).strip();
pklass = match.group(2);
if (pklass):
- pklass = pklass.strip();
+ # Strip potential template arguments from parent
+ # class.
+ match = re.match(r'(\w+)(<.*>)?', pklass.strip());
+ pklass = match.group(1).strip();
+
klasses[klass] = { 'parent': pklass };
#
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index e5ee98794d..fa18d85bf5 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -30,10 +30,12 @@ V8_DIR = os.path.dirname(MY_DIR)
OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
AUTO_EXCLUDE = [
# flag-definitions.h needs a mode set for being included.
- 'src/flag-definitions.h',
+ 'src/flags/flag-definitions.h',
]
AUTO_EXCLUDE_PATTERNS = [
'src/base/atomicops_internals_.*',
+ # TODO(petermarshall): Enable once Perfetto is built by default.
+ 'src/libplatform/tracing/perfetto*',
] + [
# platform-specific headers
'\\b{}\\b'.format(p) for p in
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 561e4547e1..b94f3add23 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -105,9 +105,9 @@ HEADER_TEMPLATE = """\
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/snapshot/natives.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -245,7 +245,10 @@ def BuildMetadata(sources, source_bytes, native_type):
raw_sources = "".join(sources.modules)
# The sources are expected to be ASCII-only.
- assert not filter(lambda value: ord(value) >= 128, raw_sources)
+ try:
+ raw_sources.encode('ascii')
+ except UnicodeEncodeError:
+ assert False
# Loop over modules and build up indices into the source blob:
get_index_cases = []
@@ -300,8 +303,8 @@ def PutInt(blob_file, value):
def PutStr(blob_file, value):
- PutInt(blob_file, len(value));
- blob_file.write(value);
+ PutInt(blob_file, len(value.encode()))
+ blob_file.write(value.encode())
def WriteStartupBlob(sources, startup_blob):
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index 1466079e26..b79a380796 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -131,6 +131,8 @@ class MetaBuildWrapper(object):
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
+ subp.add_argument('--json-output',
+ help='Write errors to json.output')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('export',
@@ -149,6 +151,8 @@ class MetaBuildWrapper(object):
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
+ subp.add_argument('--json-output',
+ help='Write errors to json.output')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
@@ -167,6 +171,12 @@ class MetaBuildWrapper(object):
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
+ subp.add_argument('--quiet', default=False, action='store_true',
+ help='Print out just the arguments, '
+ 'do not emulate the output of the gen subcommand.')
+ subp.add_argument('--recursive', default=False, action='store_true',
+ help='Lookup arguments from imported files, '
+ 'implies --quiet')
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser(
@@ -307,12 +317,15 @@ class MetaBuildWrapper(object):
def CmdLookup(self):
vals = self.Lookup()
- cmd = self.GNCmd('gen', '_path_')
- gn_args = self.GNArgs(vals)
- self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
- env = None
+ gn_args = self.GNArgs(vals, expand_imports=self.args.recursive)
+ if self.args.quiet or self.args.recursive:
+ self.Print(gn_args, end='')
+ else:
+ cmd = self.GNCmd('gen', '_path_')
+ self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+ env = None
- self.PrintCmd(cmd, env)
+ self.PrintCmd(cmd, env)
return 0
def CmdRun(self):
@@ -702,8 +715,11 @@ class MetaBuildWrapper(object):
self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
- ret, _, _ = self.Run(cmd)
+ ret, output, _ = self.Run(cmd)
if ret:
+ if self.args.json_output:
+ # write errors to json.output
+ self.WriteJSON({'output': output}, self.args.json_output)
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
@@ -852,7 +868,7 @@ class MetaBuildWrapper(object):
return [gn_path, subcommand, path] + list(args)
- def GNArgs(self, vals):
+ def GNArgs(self, vals, expand_imports=False):
if vals['cros_passthrough']:
if not 'GN_ARGS' in os.environ:
raise MBErr('MB is expecting GN_ARGS to be in the environment')
@@ -874,15 +890,24 @@ class MetaBuildWrapper(object):
if android_version_name:
gn_args += ' android_default_version_name="%s"' % android_version_name
+ args_gn_lines = []
+ parsed_gn_args = {}
+
+ args_file = vals.get('args_file', None)
+ if args_file:
+ if expand_imports:
+ content = self.ReadFile(self.ToAbsPath(args_file))
+ parsed_gn_args = gn_helpers.FromGNArgs(content)
+ else:
+ args_gn_lines.append('import("%s")' % args_file)
+
# Canonicalize the arg string into a sorted, newline-separated list
# of key-value pairs, and de-dup the keys if need be so that only
# the last instance of each arg is listed.
- gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
+ parsed_gn_args.update(gn_helpers.FromGNArgs(gn_args))
+ args_gn_lines.append(gn_helpers.ToGNString(parsed_gn_args))
- args_file = vals.get('args_file', None)
- if args_file:
- gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
- return gn_args
+ return '\n'.join(args_gn_lines)
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
@@ -949,8 +974,11 @@ class MetaBuildWrapper(object):
try:
self.WriteJSON(gn_inp, gn_input_path)
cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
- ret, _, _ = self.Run(cmd, force_verbose=True)
+ ret, output, _ = self.Run(cmd, force_verbose=True)
if ret:
+ if self.args.json_output:
+ # write errors to json.output
+ self.WriteJSON({'output': output}, self.args.json_output)
return ret
gn_outp_str = self.ReadFile(gn_output_path)
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 1889f18a3a..a22686a5ee 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -549,7 +549,18 @@ class UnitTest(unittest.TestCase):
'//out/Default', 'base_unittests'], mbw=mbw, ret=0)
def test_lookup(self):
- self.check(['lookup', '-c', 'debug_goma'], ret=0)
+ self.check(['lookup', '-c', 'debug_goma'], ret=0,
+ out=('\n'
+ 'Writing """\\\n'
+ 'is_debug = true\n'
+ 'use_goma = true\n'
+ '""" to _path_/args.gn.\n\n'
+ '/fake_src/buildtools/linux64/gn gen _path_\n'))
+
+ def test_quiet_lookup(self):
+ self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
+ out=('is_debug = true\n'
+ 'use_goma = true\n'))
def test_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
@@ -597,6 +608,19 @@ class UnitTest(unittest.TestCase):
'--phase', 'phase_2'], ret=0)
self.assertIn('phase = 2', mbw.out)
+ def test_recursive_lookup(self):
+ files = {
+ '/fake_src/build/args/fake.gn': (
+ 'enable_doom_melon = true\n'
+ 'enable_antidoom_banana = true\n'
+ )
+ }
+ self.check(['lookup', '-m', 'fake_master', '-b', 'fake_args_file',
+ '--recursive'], files=files, ret=0,
+ out=('enable_antidoom_banana = true\n'
+ 'enable_doom_melon = true\n'
+ 'use_goma = true\n'))
+
def test_validate(self):
mbw = self.fake_mbw()
self.check(['validate'], mbw=mbw, ret=0)
diff --git a/deps/v8/tools/node/build_gn.py b/deps/v8/tools/node/build_gn.py
deleted file mode 100755
index 83071adbfe..0000000000
--- a/deps/v8/tools/node/build_gn.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to build libv8_monolith.a as dependency for Node.js
-Required dependencies can be fetched with fetch_deps.py.
-
-Usage: build_gn.py <Debug/Release> <v8-path> <build-path> [<build-flags>]...
-
-Build flags are passed either as "strings" or numeric value. True/false
-are represented as 1/0. E.g.
-
- v8_promise_internal_field_count=2
- target_cpu="x64"
- v8_enable_disassembler=0
-"""
-
-import argparse
-import os
-import subprocess
-import sys
-
-import node_common
-
-GN_ARGS = [
- "v8_monolithic=true",
- "is_component_build=false",
- "v8_use_external_startup_data=false",
- "use_custom_libcxx=false",
-]
-
-BUILD_TARGET = "v8_monolith"
-
-def FindTargetOs(flags):
- for flag in flags:
- if flag.startswith("target_os="):
- return flag[len("target_os="):].strip('"')
- raise Exception('No target_os was set.')
-
-def FindGn(options):
- if options.host_os == "linux":
- os_path = "linux64"
- elif options.host_os == "mac":
- os_path = "mac"
- elif options.host_os == "win":
- os_path = "win"
- else:
- raise "Operating system not supported by GN"
- return os.path.join(options.v8_path, "buildtools", os_path, "gn")
-
-def GenerateBuildFiles(options):
- gn = FindGn(options)
- gn_args = list(GN_ARGS)
- target_os = FindTargetOs(options.flag)
- if target_os != "win":
- gn_args.append("use_sysroot=false")
-
- for flag in options.flag:
- flag = flag.replace("=1", "=true")
- flag = flag.replace("=0", "=false")
- flag = flag.replace("target_cpu=ia32", "target_cpu=\"x86\"")
- gn_args.append(flag)
- if options.mode == "Debug":
- gn_args.append("is_debug=true")
- else:
- gn_args.append("is_debug=false")
-
- flattened_args = ' '.join(gn_args)
- if options.extra_gn_args:
- flattened_args += ' ' + options.extra_gn_args
-
- args = [gn, "gen", options.build_path, "-q", "--args=" + flattened_args]
- subprocess.check_call(args)
-
-def Build(options):
- depot_tools = node_common.EnsureDepotTools(options.v8_path, False)
- ninja = os.path.join(depot_tools, "ninja")
- if sys.platform == 'win32':
- # Required because there is an extension-less file called "ninja".
- ninja += ".exe"
- args = [ninja, "-C", options.build_path, BUILD_TARGET]
- if options.max_load:
- args += ["-l" + options.max_load]
- if options.max_jobs:
- args += ["-j" + options.max_jobs]
- else:
- with open(os.path.join(options.build_path, "args.gn")) as f:
- if "use_goma = true" in f.read():
- args += ["-j500"]
- subprocess.check_call(args)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(
- description="Build %s with GN" % BUILD_TARGET)
- parser.add_argument("--mode", help="Build mode (Release/Debug)")
- parser.add_argument("--v8_path", help="Path to V8", required=True)
- parser.add_argument("--build_path", help="Path to build result",
- required=True)
- parser.add_argument("--flag", help="Translate GYP flag to GN",
- action="append")
- parser.add_argument("--host_os", help="Current operating system")
- parser.add_argument("--bundled-win-toolchain",
- help="Value for DEPOT_TOOLS_WIN_TOOLCHAIN")
- parser.add_argument("--bundled-win-toolchain-root",
- help="Value for DEPOT_TOOLS_WIN_TOOLCHAIN_ROOT")
- parser.add_argument("--depot-tools", help="Absolute path to depot_tools")
- parser.add_argument("--extra-gn-args", help="Additional GN args")
- parser.add_argument("--build", help="Run ninja as opposed to gn gen.",
- action="store_true")
- parser.add_argument("--max-jobs", help="ninja's -j parameter")
- parser.add_argument("--max-load", help="ninja's -l parameter")
- options = parser.parse_args(args)
-
- options.build_path = os.path.abspath(options.build_path)
-
- if not options.build:
- assert options.host_os
- assert options.mode == "Debug" or options.mode == "Release"
-
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.v8_path)
-
- return options
-
-
-if __name__ == "__main__":
- options = ParseOptions(sys.argv[1:])
- # Build can result in running gn gen, so need to set environment variables
- # for build as well as generate.
- if options.bundled_win_toolchain:
- os.environ['DEPOT_TOOLS_WIN_TOOLCHAIN'] = options.bundled_win_toolchain
- if options.bundled_win_toolchain_root:
- os.environ['DEPOT_TOOLS_WIN_TOOLCHAIN_ROOT'] = (
- options.bundled_win_toolchain_root)
- if options.depot_tools:
- os.environ['PATH'] = (
- options.depot_tools + os.path.pathsep + os.environ['PATH'])
- if not options.build:
- GenerateBuildFiles(options)
- else:
- Build(options)
diff --git a/deps/v8/tools/node/test_update_node.py b/deps/v8/tools/node/test_update_node.py
deleted file mode 100755
index 785517b8c8..0000000000
--- a/deps/v8/tools/node/test_update_node.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import unittest
-
-import update_node
-
-# Base paths.
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-TEST_DATA = os.path.join(BASE_DIR, 'testdata')
-
-# Expectations.
-EXPECTED_GITIGNORE = """
-/third_party/googletest/*
-!/third_party/googletest/src
-/third_party/googletest/src/*
-!/third_party/googletest/src/googletest
-/third_party/googletest/src/googletest/*
-!/third_party/googletest/src/googletest/include
-/third_party/googletest/src/googletest/include/*
-!/third_party/googletest/src/googletest/include/gtest
-/third_party/googletest/src/googletest/include/gtest/*
-!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
-!/third_party/jinja2
-!/third_party/markupsafe
-/unrelated
-"""
-
-EXPECTED_GIT_DIFF = """
- create mode 100644 deps/v8/base/trace_event/common/common
- rename deps/v8/baz/{delete_me => v8_new} (100%)
- delete mode 100644 deps/v8/include/v8-version.h
- rename deps/v8/{delete_me => new/v8_new} (100%)
- create mode 100644 deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
- create mode 100644 deps/v8/third_party/jinja2/jinja2
- create mode 100644 deps/v8/third_party/markupsafe/markupsafe
- create mode 100644 deps/v8/v8_new
-"""
-
-ADDED_FILES = [
- 'v8_new',
- 'new/v8_new',
- 'baz/v8_new',
- '/third_party/googletest/src/googletest/include/gtest/gtest_new',
- '/third_party/googletest/src/googletest/include/gtest/new/gtest_new',
- '/third_party/googletest/src/googletest/include/gtest/baz/gtest_new',
- 'third_party/jinja2/jinja2',
- 'third_party/markupsafe/markupsafe'
-]
-
-REMOVED_FILES = [
- 'delete_me',
- 'baz/delete_me',
- 'testing/gtest/delete_me',
- 'testing/gtest/baz/delete_me',
-]
-
-def gitify(path):
- files = os.listdir(path)
- subprocess.check_call(['git', 'init'], cwd=path)
- subprocess.check_call(['git', 'add'] + files, cwd=path)
- subprocess.check_call(['git', 'commit', '-m', 'Initial'], cwd=path)
-
-
-class TestUpdateNode(unittest.TestCase):
- def setUp(self):
- self.workdir = tempfile.mkdtemp(prefix='tmp_test_node_')
-
- def tearDown(self):
- shutil.rmtree(self.workdir)
-
- def testUpdate(self):
- v8_cwd = os.path.join(self.workdir, 'v8')
- node_cwd = os.path.join(self.workdir, 'node')
-
- # Set up V8 test fixture.
- shutil.copytree(src=os.path.join(TEST_DATA, 'v8'), dst=v8_cwd)
- gitify(v8_cwd)
- for repository in update_node.SUB_REPOSITORIES:
- gitify(os.path.join(v8_cwd, *repository))
-
- # Set up node test fixture.
- shutil.copytree(src=os.path.join(TEST_DATA, 'node'), dst=node_cwd)
- gitify(os.path.join(node_cwd))
-
- # Add a patch.
- with open(os.path.join(v8_cwd, 'v8_foo'), 'w') as f:
- f.write('zonk')
- subprocess.check_call(['git', 'add', 'v8_foo'], cwd=v8_cwd)
-
- # Run update script.
- update_node.Main([v8_cwd, node_cwd, "--commit", "--with-patch"])
-
- # Check expectations.
- with open(os.path.join(node_cwd, 'deps', 'v8', '.gitignore')) as f:
- actual_gitignore = f.read()
- self.assertEquals(EXPECTED_GITIGNORE.strip(), actual_gitignore.strip())
- for f in ADDED_FILES:
- added_file = os.path.join(node_cwd, 'deps', 'v8', *f.split('/'))
- self.assertTrue(os.path.exists(added_file))
- for f in REMOVED_FILES:
- removed_file = os.path.join(node_cwd, 'deps', 'v8', *f.split('/'))
- self.assertFalse(os.path.exists(removed_file))
- gitlog = subprocess.check_output(
- ['git', 'diff', 'master', '--summary'],
- cwd=node_cwd,
- )
- self.assertEquals(EXPECTED_GIT_DIFF.strip(), gitlog.strip())
-
- # Check patch.
- gitlog = subprocess.check_output(
- ['git', 'diff', 'master', '--cached', '--', 'deps/v8/v8_foo'],
- cwd=node_cwd,
- )
- self.assertIn('+zonk', gitlog.strip())
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/.gitignore b/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
deleted file mode 100644
index 23c2024827..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-/unrelated
-/testing/gtest/*
-!/testing/gtest/include
-/testing/gtest/include/*
-!/testing/gtest/include/gtest
-/testing/gtest/include/gtest/*
-!/testing/gtest/include/gtest/gtest_prod.h
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/delete_me
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/delete_me
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h b/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
deleted file mode 100644
index fe8b2712e3..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INCLUDE_VERSION_H_ // V8_VERSION_H_ conflicts with src/version.h
-#define V8_INCLUDE_VERSION_H_
-
-// These macros define the version number for the current version.
-// NOTE these macros are used by some of the tool scripts and the build
-// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 1
-#define V8_MINOR_VERSION 2
-#define V8_BUILD_NUMBER 3
-#define V8_PATCH_LEVEL 4321
-
-// Use 1 for candidates and 0 otherwise.
-// (Boolean macro values are not supported by all preprocessors.)
-#define V8_IS_CANDIDATE_VERSION 0
-
-#endif // V8_INCLUDE_VERSION_H_
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/.gitignore b/deps/v8/tools/node/testdata/v8/.gitignore
deleted file mode 100644
index cc2f1ca202..0000000000
--- a/deps/v8/tools/node/testdata/v8/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/unrelated
-/third_party/jinja2
-/third_party/markupsafe
diff --git a/deps/v8/tools/node/testdata/v8/base/trace_event/common/common b/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
+++ /dev/null
diff --git a/deps/v8/tools/node/testdata/v8/baz/v8_foo b/deps/v8/tools/node/testdata/v8/baz/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/baz/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/baz/v8_new b/deps/v8/tools/node/testdata/v8/baz/v8_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/baz/v8_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/new/v8_new b/deps/v8/tools/node/testdata/v8/new/v8_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/new/v8_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo b/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new b/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar b/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new b/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new b/deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/v8_foo b/deps/v8/tools/node/testdata/v8/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/v8_new b/deps/v8/tools/node/testdata/v8/v8_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/v8_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
deleted file mode 100755
index 2ebf799c5e..0000000000
--- a/deps/v8/tools/node/update_node.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to update V8 in a Node.js checkout.
-
-Requirements:
- - Node.js checkout in which V8 should be updated.
- - V8 checkout at the commit to which Node.js should be updated.
-
-Usage:
- $ update_node.py <path_to_v8> <path_to_node>
-
- This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
- and a few V8 dependencies require in Node.js. It will also update .gitignore
- appropriately.
-
-Optional flags:
- --gclient Run `gclient sync` on the V8 checkout before updating.
- --commit Create commit with the updated V8 in the Node.js checkout.
- --with-patch Also include currently staged files in the V8 checkout.
-"""
-
-# for py2/py3 compatibility
-from __future__ import print_function
-
-import argparse
-import os
-import shutil
-import subprocess
-import sys
-import stat
-import node_common
-
-TARGET_SUBDIR = os.path.join("deps", "v8")
-
-SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
- ["third_party", "googletest", "src"] ]
-
-DELETE_FROM_GITIGNORE = [ "/base",
- "/third_party/googletest/src" ]
-
-# Node.js requires only a single header file from gtest to build V8.
-ADD_TO_GITIGNORE = [ "/third_party/googletest/*",
- "!/third_party/googletest/BUILD.gn",
- "!/third_party/googletest/src",
- "/third_party/googletest/src/*",
- "!/third_party/googletest/src/googletest",
- "/third_party/googletest/src/googletest/*",
- "!/third_party/googletest/src/googletest/include",
- "/third_party/googletest/src/googletest/include/*",
- "!/third_party/googletest/src/googletest/include/gtest",
- "/third_party/googletest/src/googletest/include/gtest/*",
- "!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h" ]
-
-# Node.js owns deps/v8/gypfiles in their downstream repository.
-FILES_TO_KEEP = [ "gypfiles" ]
-
-def RunGclient(path):
- assert os.path.isdir(path)
- print(">> Running gclient sync")
- subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
-
-def CommitPatch(options):
- """Makes a dummy commit for the changes in the index.
-
- On trybots, bot_updated applies the patch to the index. We commit it to make
- the fake git clone fetch it into node.js. We can leave the commit, as
- bot_update will ensure a clean state on each run.
- """
- print(">> Committing patch")
- subprocess.check_call(
- ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
- "commit", "--allow-empty", "-m", "placeholder-commit"],
- cwd=options.v8_path,
- )
-
-def UpdateTarget(repository, options, files_to_keep):
- source = os.path.join(options.v8_path, *repository)
- target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
- print(">> Updating target directory %s" % target)
- print(">> from active branch at %s" % source)
- if not os.path.exists(target):
- os.makedirs(target)
- # Remove possible remnants of previous incomplete runs.
- node_common.UninitGit(target)
-
- git_args = []
- git_args.append(["init"]) # initialize target repo
-
- if files_to_keep:
- git_args.append(["add"] + files_to_keep) # add and commit
- git_args.append(["commit", "-m", "keep files"]) # files we want to keep
-
- git_args.append(["clean", "-fxd"]) # nuke everything else
- git_args.append(["remote", "add", "source", source]) # point to source repo
- git_args.append(["fetch", "source", "HEAD"]) # sync to current branch
- git_args.append(["checkout", "-f", "FETCH_HEAD"]) # switch to that branch
- git_args.append(["clean", "-fxd"]) # delete removed files
-
- if files_to_keep:
- git_args.append(["cherry-pick", "master"]) # restore kept files
-
- try:
- for args in git_args:
- subprocess.check_call(["git"] + args, cwd=target)
- except:
- raise
- finally:
- node_common.UninitGit(target)
-
-def UpdateGitIgnore(options):
- file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
- assert os.path.isfile(file_name)
- print(">> Updating .gitignore with lines")
- with open(file_name) as gitignore:
- content = gitignore.readlines()
- content = [x.strip() for x in content]
- for x in DELETE_FROM_GITIGNORE:
- if x in content:
- print("- %s" % x)
- content.remove(x)
- for x in ADD_TO_GITIGNORE:
- if x not in content:
- print("+ %s" % x)
- content.append(x)
- content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
- with open(file_name, "w") as gitignore:
- for x in content:
- gitignore.write("%s\n" % x)
-
-def CreateCommit(options):
- print(">> Creating commit.")
- # Find git hash from source.
- githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
- cwd=options.v8_path).strip()
- # Create commit at target.
- git_commands = [
- ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch
- ["git", "add", "."], # add files
- ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit
- ]
- for command in git_commands:
- subprocess.check_call(command, cwd=options.node_path)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(description="Update V8 in Node.js")
- parser.add_argument("v8_path", help="Path to V8 checkout")
- parser.add_argument("node_path", help="Path to Node.js checkout")
- parser.add_argument("--gclient", action="store_true", help="Run gclient sync")
- parser.add_argument("--commit", action="store_true", help="Create commit")
- parser.add_argument("--with-patch", action="store_true",
- help="Apply also staged files")
- options = parser.parse_args(args)
- assert os.path.isdir(options.v8_path)
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.node_path)
- options.node_path = os.path.abspath(options.node_path)
- return options
-
-def Main(args):
- options = ParseOptions(args)
- if options.gclient:
- RunGclient(options.v8_path)
- # Commit patch on trybots to main V8 repository.
- if options.with_patch:
- CommitPatch(options)
- # Update main V8 repository.
- UpdateTarget([""], options, FILES_TO_KEEP)
- # Patch .gitignore before updating sub-repositories.
- UpdateGitIgnore(options)
- for repo in SUB_REPOSITORIES:
- UpdateTarget(repo, options, None)
- if options.commit:
- CreateCommit(options)
-
-if __name__ == "__main__":
- Main(sys.argv[1:])
diff --git a/deps/v8/tools/profviz/worker.js b/deps/v8/tools/profviz/worker.js
index 7f163088e4..95ed40b89b 100644
--- a/deps/v8/tools/profviz/worker.js
+++ b/deps/v8/tools/profviz/worker.js
@@ -100,7 +100,7 @@ function run(args) {
var profile = "";
print = function(text) { profile += text + "\n"; };
// Dummy entries provider, as we cannot call nm.
- var entriesProvider = new UnixCppEntriesProvider("", "");
+ var entriesProvider = new UnixCppEntriesProvider("", "", "");
var targetRootFS = "";
var separateIc = false;
var callGraphSize = 5;
diff --git a/deps/v8/tools/run-wasm-api-tests.py b/deps/v8/tools/run-wasm-api-tests.py
index 46e13d3255..79f53cb927 100644..100755
--- a/deps/v8/tools/run-wasm-api-tests.py
+++ b/deps/v8/tools/run-wasm-api-tests.py
@@ -30,7 +30,7 @@ import shutil
import subprocess
import sys
-CFLAGS = "-DDEBUG -Wall -Werror -O0 -fsanitize=address"
+CFLAGS = "-DDEBUG -Wall -Werror -O0 -ggdb -fsanitize=address"
CHECKOUT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
WASM_PATH = os.path.join(CHECKOUT_PATH, "third_party", "wasm-api")
@@ -87,12 +87,12 @@ class Runner(object):
dst_wasm_file = self.dst_file_basename + ".wasm"
shutil.copyfile(src_wasm_file, dst_wasm_file)
- def _Error(self, step, lang, compiler):
+ def _Error(self, step, lang, compiler, code):
print("Error: %s failed. To repro: tools/run-wasm-api-tests.py "
"%s %s %s %s %s" %
(step, self.outdir, self.tempdir, self.name, lang,
compiler["name"].lower()))
-
+ return code
def CompileAndRun(self, compiler, language):
print("==== %s %s/%s ====" %
@@ -104,15 +104,15 @@ class Runner(object):
# Compile.
c = _Call([compiler[lang], "-c", language["cflags"], CFLAGS,
"-I", WASM_PATH, "-o", obj_file, src_file])
- if c: return self._Error("compilation", lang, compiler)
+ if c: return self._Error("compilation", lang, compiler, c)
# Link.
c = _Call([compiler["cc"], CFLAGS, compiler["ldflags"], obj_file,
"-o", exe_file, self.lib_file, "-ldl -pthread"])
- if c: return self._Error("linking", lang, compiler)
+ if c: return self._Error("linking", lang, compiler, c)
# Execute.
exe_file = "./%s-%s" % (self.name, lang)
c = _Call(["cd", self.tempdir, ";", exe_file])
- if c: return self._Error("execution", lang, compiler)
+ if c: return self._Error("execution", lang, compiler, c)
return 0
def Main(args):
@@ -157,6 +157,10 @@ def Main(args):
for language in languages:
c = runner.CompileAndRun(compiler, language)
if c: result = c
+ if result:
+ print("\nFinished with errors.")
+ else:
+ print("\nFinished successfully.")
return result
if __name__ == "__main__":
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 9e05be99e5..419cc47847 100755..100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -108,27 +107,30 @@ from __future__ import print_function
from functools import reduce
from collections import OrderedDict
-import datetime
+import copy
import json
import logging
import math
-import optparse
+import argparse
import os
import re
import subprocess
import sys
+import time
import traceback
+import numpy
+
from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
+from testrunner.objects.output import Output, NULL_OUTPUT
try:
basestring # Python 2
except NameError: # Python 3
basestring = str
-ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ['arm',
'ia32',
'mips',
@@ -141,6 +143,7 @@ RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
INFRA_FAILURE_RETCODE = 87
+MIN_RUNS_FOR_CONFIDENCE = 10
def GeometricMean(values):
@@ -149,116 +152,130 @@ def GeometricMean(values):
The mean is calculated using log to avoid overflow.
"""
values = map(float, values)
- return str(math.exp(sum(map(math.log, values)) / len(values)))
-
-
-class TestFailedError(Exception):
- """Error raised when a test has failed due to a non-infra issue."""
- pass
-
-
-class Results(object):
- """Place holder for result traces."""
- def __init__(self, traces=None, errors=None):
- self.traces = traces or []
- self.errors = errors or []
- self.timeouts = []
- self.near_timeouts = [] # > 90% of the max runtime
+ return math.exp(sum(map(math.log, values)) / len(values))
+
+
+class ResultTracker(object):
+ """Class that tracks trace/runnable results and produces script output.
+
+ The output is structured like this:
+ {
+ "traces": [
+ {
+ "graphs": ["path", "to", "trace", "config"],
+ "units": <string describing units, e.g. "ms" or "KB">,
+ "results": [<list of values measured over several runs>],
+ "stddev": <stddev of the value if measure by script or ''>
+ },
+ ...
+ ],
+ "runnables": [
+ {
+ "graphs": ["path", "to", "runnable", "config"],
+ "durations": [<list of durations of each runnable run in seconds>],
+ "timeout": <timeout configured for runnable in seconds>,
+ },
+ ...
+ ],
+ "errors": [<list of strings describing errors>],
+ }
+ """
+ def __init__(self):
+ self.traces = {}
+ self.errors = []
+ self.runnables = {}
+
+ def AddTraceResult(self, trace, result, stddev):
+ if trace.name not in self.traces:
+ self.traces[trace.name] = {
+ 'graphs': trace.graphs,
+ 'units': trace.units,
+ 'results': [result],
+ 'stddev': stddev or '',
+ }
+ else:
+ existing_entry = self.traces[trace.name]
+ assert trace.graphs == existing_entry['graphs']
+ assert trace.units == existing_entry['units']
+ if stddev:
+ existing_entry['stddev'] = stddev
+ existing_entry['results'].append(result)
+
+ def TraceHasStdDev(self, trace):
+ return trace.name in self.traces and self.traces[trace.name]['stddev'] != ''
+
+ def AddError(self, error):
+ self.errors.append(error)
+
+ def AddRunnableDuration(self, runnable, duration):
+ """Records a duration of a specific run of the runnable."""
+ if runnable.name not in self.runnables:
+ self.runnables[runnable.name] = {
+ 'graphs': runnable.graphs,
+ 'durations': [duration],
+ 'timeout': runnable.timeout,
+ }
+ else:
+ existing_entry = self.runnables[runnable.name]
+ assert runnable.timeout == existing_entry['timeout']
+ assert runnable.graphs == existing_entry['graphs']
+ existing_entry['durations'].append(duration)
def ToDict(self):
return {
- 'traces': self.traces,
+ 'traces': self.traces.values(),
'errors': self.errors,
- 'timeouts': self.timeouts,
- 'near_timeouts': self.near_timeouts,
+ 'runnables': self.runnables.values(),
}
def WriteToFile(self, file_name):
with open(file_name, 'w') as f:
f.write(json.dumps(self.ToDict()))
- def __add__(self, other):
- self.traces += other.traces
- self.errors += other.errors
- self.timeouts += other.timeouts
- self.near_timeouts += other.near_timeouts
- return self
-
- def __str__(self): # pragma: no cover
- return str(self.ToDict())
-
-
-class Measurement(object):
- """Represents a series of results of one trace.
-
- The results are from repetitive runs of the same executable. They are
- gathered by repeated calls to ConsumeOutput.
- """
- def __init__(self, graphs, units, results_regexp, stddev_regexp):
- self.name = '/'.join(graphs)
- self.graphs = graphs
- self.units = units
- self.results_regexp = results_regexp
- self.stddev_regexp = stddev_regexp
- self.results = []
- self.errors = []
- self.stddev = ''
- self.process_size = False
-
- def ConsumeOutput(self, stdout):
- try:
- result = re.search(self.results_regexp, stdout, re.M).group(1)
- self.results.append(str(float(result)))
- except ValueError:
- self.errors.append('Regexp "%s" returned a non-numeric for test %s.'
- % (self.results_regexp, self.name))
- except:
- self.errors.append('Regexp "%s" did not match for test %s.'
- % (self.results_regexp, self.name))
-
- try:
- if self.stddev_regexp and self.stddev:
- self.errors.append('Test %s should only run once since a stddev '
- 'is provided by the test.' % self.name)
- if self.stddev_regexp:
- self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
- except:
- self.errors.append('Regexp "%s" did not match for test %s.'
- % (self.stddev_regexp, self.name))
+ def HasEnoughRuns(self, graph_config, confidence_level):
+ """Checks if the mean of the results for a given trace config is within
+ 0.1% of the true value with the specified confidence level.
- def GetResults(self):
- return Results([{
- 'graphs': self.graphs,
- 'units': self.units,
- 'results': self.results,
- 'stddev': self.stddev,
- }], self.errors)
+ This assumes Gaussian distribution of the noise and based on
+ https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule.
+ Args:
+ graph_config: An instance of GraphConfig.
+ confidence_level: Number of standard deviations from the mean that all
+ values must lie within. Typical values are 1, 2 and 3 and correspond
+ to 68%, 95% and 99.7% probability that the measured value is within
+ 0.1% of the true value.
+
+ Returns:
+ True if specified confidence level have been achieved.
+ """
+ if not isinstance(graph_config, TraceConfig):
+ return all(self.HasEnoughRuns(child, confidence_level)
+ for child in graph_config.children)
-class NullMeasurement(object):
- """Null object to avoid having extra logic for configurations that don't
- require secondary run, e.g. CI bots.
- """
- def ConsumeOutput(self, stdout):
- pass
+ trace = self.traces.get(graph_config.name, {})
+ results = trace.get('results', [])
+ logging.debug('HasEnoughRuns for %s', graph_config.name)
- def GetResults(self):
- return Results()
+ if len(results) < MIN_RUNS_FOR_CONFIDENCE:
+ logging.debug(' Ran %d times, need at least %d',
+ len(results), MIN_RUNS_FOR_CONFIDENCE)
+ return False
+ logging.debug(' Results: %d entries', len(results))
+ mean = numpy.mean(results)
+ mean_stderr = numpy.std(results) / numpy.sqrt(len(results))
+ logging.debug(' Mean: %.2f, mean_stderr: %.2f', mean, mean_stderr)
+ return confidence_level * mean_stderr < mean / 1000.0
-def Unzip(iterable):
- left = []
- right = []
- for l, r in iterable:
- left.append(l)
- right.append(r)
- return lambda: iter(left), lambda: iter(right)
+ def __str__(self): # pragma: no cover
+ return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
-def RunResultsProcessor(results_processor, stdout, count):
+def RunResultsProcessor(results_processor, output, count):
# Dummy pass through for null-runs.
- if stdout is None:
- return None
+ if output.stdout is None:
+ return output
# We assume the results processor is relative to the suite.
assert os.path.exists(results_processor)
@@ -268,112 +285,10 @@ def RunResultsProcessor(results_processor, stdout, count):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
- result, _ = p.communicate(input=stdout)
- logging.info('>>> Processed stdout (#%d):\n%s', count, result)
- return result
-
-
-def AccumulateResults(
- graph_names, trace_configs, iter_output, perform_measurement, calc_total):
- """Iterates over the output of multiple benchmark reruns and accumulates
- results for a configured list of traces.
-
- Args:
- graph_names: List of names that configure the base path of the traces. E.g.
- ['v8', 'Octane'].
- trace_configs: List of 'TraceConfig' instances. Each trace config defines
- how to perform a measurement.
- iter_output: Iterator over the standard output of each test run.
- perform_measurement: Whether to actually run tests and perform measurements.
- This is needed so that we reuse this script for both CI
- and trybot, but want to ignore second run on CI without
- having to spread this logic throughout the script.
- calc_total: Boolean flag to speficy the calculation of a summary trace.
- Returns: A 'Results' object.
- """
- measurements = [
- trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
- for stdout in iter_output():
- for measurement in measurements:
- measurement.ConsumeOutput(stdout)
-
- res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
-
- if not res.traces or not calc_total:
- return res
-
- # Assume all traces have the same structure.
- if len(set(map(lambda t: len(t['results']), res.traces))) != 1:
- res.errors.append('Not all traces have the same number of results.')
- return res
-
- # Calculate the geometric means for all traces. Above we made sure that
- # there is at least one trace and that the number of results is the same
- # for each trace.
- n_results = len(res.traces[0]['results'])
- total_results = [GeometricMean(t['results'][i] for t in res.traces)
- for i in range(0, n_results)]
- res.traces.append({
- 'graphs': graph_names + ['Total'],
- 'units': res.traces[0]['units'],
- 'results': total_results,
- 'stddev': '',
- })
- return res
-
-
-def AccumulateGenericResults(graph_names, suite_units, iter_output):
- """Iterates over the output of multiple benchmark reruns and accumulates
- generic results.
-
- Args:
- graph_names: List of names that configure the base path of the traces. E.g.
- ['v8', 'Octane'].
- suite_units: Measurement default units as defined by the benchmark suite.
- iter_output: Iterator over the standard output of each test run.
- Returns: A 'Results' object.
- """
- traces = OrderedDict()
- for stdout in iter_output():
- if stdout is None:
- # The None value is used as a null object to simplify logic.
- continue
- for line in stdout.strip().splitlines():
- match = GENERIC_RESULTS_RE.match(line)
- if match:
- stddev = ''
- graph = match.group(1)
- trace = match.group(2)
- body = match.group(3)
- units = match.group(4)
- match_stddev = RESULT_STDDEV_RE.match(body)
- match_list = RESULT_LIST_RE.match(body)
- errors = []
- if match_stddev:
- result, stddev = map(str.strip, match_stddev.group(1).split(','))
- results = [result]
- elif match_list:
- results = map(str.strip, match_list.group(1).split(','))
- else:
- results = [body.strip()]
-
- try:
- results = map(lambda r: str(float(r)), results)
- except ValueError:
- results = []
- errors = ['Found non-numeric in %s' %
- '/'.join(graph_names + [graph, trace])]
-
- trace_result = traces.setdefault(trace, Results([{
- 'graphs': graph_names + [graph, trace],
- 'units': (units or suite_units).strip(),
- 'results': [],
- 'stddev': '',
- }], errors))
- trace_result.traces[0]['results'].extend(results)
- trace_result.traces[0]['stddev'] = stddev
-
- return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+ new_output = copy.copy(output)
+ new_output.stdout, _ = p.communicate(input=output.stdout)
+ logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
+ return new_output
class Node(object):
@@ -384,6 +299,10 @@ class Node(object):
def AppendChild(self, child):
self._children.append(child)
+ @property
+ def children(self):
+ return self._children
+
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
@@ -392,7 +311,7 @@ class DefaultSentinel(Node):
self.binary = binary
self.run_count = 10
self.timeout = 60
- self.retry_count = 0
+ self.retry_count = 4
self.path = []
self.graphs = []
self.flags = []
@@ -465,6 +384,10 @@ class GraphConfig(Node):
stddev_default = None
self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
+ @property
+ def name(self):
+ return '/'.join(self.graphs)
+
class TraceConfig(GraphConfig):
"""Represents a leaf in the suite tree structure."""
@@ -473,16 +396,46 @@ class TraceConfig(GraphConfig):
assert self.results_regexp
assert self.owners
- def CreateMeasurement(self, perform_measurement):
- if not perform_measurement:
- return NullMeasurement()
+ def ConsumeOutput(self, output, result_tracker):
+ """Extracts trace results from the output.
+
+ Args:
+ output: Output object from the test run.
+ result_tracker: Result tracker to be updated.
+
+ Returns:
+ The raw extracted result value or None if an error occurred.
+ """
+ result = None
+ stddev = None
+
+ try:
+ result = float(
+ re.search(self.results_regexp, output.stdout, re.M).group(1))
+ except ValueError:
+ result_tracker.AddError(
+ 'Regexp "%s" returned a non-numeric for test %s.' %
+ (self.results_regexp, self.name))
+ except:
+ result_tracker.AddError(
+ 'Regexp "%s" did not match for test %s.' %
+ (self.results_regexp, self.name))
- return Measurement(
- self.graphs,
- self.units,
- self.results_regexp,
- self.stddev_regexp,
- )
+ try:
+ if self.stddev_regexp:
+ if result_tracker.TraceHasStdDev(self):
+ result_tracker.AddError(
+ 'Test %s should only run once since a stddev is provided by the '
+ 'test.' % self.name)
+ stddev = re.search(self.stddev_regexp, output.stdout, re.M).group(1)
+ except:
+ result_tracker.AddError(
+ 'Regexp "%s" did not match for test %s.' %
+ (self.stddev_regexp, self.name))
+
+ if result:
+ result_tracker.AddTraceResult(self, result, stddev)
+ return result
class RunnableConfig(GraphConfig):
@@ -490,22 +443,12 @@ class RunnableConfig(GraphConfig):
"""
def __init__(self, suite, parent, arch):
super(RunnableConfig, self).__init__(suite, parent, arch)
- self.has_timeouts = False
- self.has_near_timeouts = False
+ self.arch = arch
@property
def main(self):
return self._suite.get('main', '')
- def PostProcess(self, stdouts_iter):
- if self.results_processor:
- def it():
- for i, stdout in enumerate(stdouts_iter()):
- yield RunResultsProcessor(self.results_processor, stdout, i + 1)
- return it
- else:
- return stdouts_iter
-
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
@@ -537,25 +480,36 @@ class RunnableConfig(GraphConfig):
args=self.GetCommandFlags(extra_flags=extra_flags),
timeout=self.timeout or 60)
- def Run(self, runner, trybot):
- """Iterates over several runs and handles the output for all traces."""
- stdout, stdout_secondary = Unzip(runner())
- return (
- AccumulateResults(
- self.graphs,
- self._children,
- iter_output=self.PostProcess(stdout),
- perform_measurement=True,
- calc_total=self.total,
- ),
- AccumulateResults(
- self.graphs,
- self._children,
- iter_output=self.PostProcess(stdout_secondary),
- perform_measurement=trybot, # only run second time on trybots
- calc_total=self.total,
- ),
- )
+ def ProcessOutput(self, output, result_tracker, count):
+ """Processes test run output and updates result tracker.
+
+ Args:
+ output: Output object from the test run.
+ result_tracker: ResultTracker object to be updated.
+ count: Index of the test run (used for better logging).
+ """
+ if self.results_processor:
+ output = RunResultsProcessor(self.results_processor, output, count)
+
+ results_for_total = []
+ for trace in self.children:
+ result = trace.ConsumeOutput(output, result_tracker)
+ if result:
+ results_for_total.append(result)
+
+ if self.total:
+ # Produce total metric only when all traces have produced results.
+ if len(self.children) != len(results_for_total):
+ result_tracker.AddError(
+ 'Not all traces have produced results. Can not compute total for '
+ '%s.' % self.name)
+ return
+
+ # Calculate total as a the geometric mean for results from all traces.
+ total_trace = TraceConfig(
+ {'name': 'Total', 'units': self.children[0].units}, self, self.arch)
+ result_tracker.AddTraceResult(
+ total_trace, GeometricMean(results_for_total), '')
class RunnableTraceConfig(TraceConfig, RunnableConfig):
@@ -563,30 +517,9 @@ class RunnableTraceConfig(TraceConfig, RunnableConfig):
def __init__(self, suite, parent, arch):
super(RunnableTraceConfig, self).__init__(suite, parent, arch)
- def Run(self, runner, trybot):
- """Iterates over several runs and handles the output."""
- measurement = self.CreateMeasurement(perform_measurement=True)
- measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
- for stdout, stdout_secondary in runner():
- measurement.ConsumeOutput(stdout)
- measurement_secondary.ConsumeOutput(stdout_secondary)
- return (
- measurement.GetResults(),
- measurement_secondary.GetResults(),
- )
-
-
-class RunnableGenericConfig(RunnableConfig):
- """Represents a runnable suite definition with generic traces."""
- def __init__(self, suite, parent, arch):
- super(RunnableGenericConfig, self).__init__(suite, parent, arch)
-
- def Run(self, runner, trybot):
- stdout, stdout_secondary = Unzip(runner())
- return (
- AccumulateGenericResults(self.graphs, self.units, stdout),
- AccumulateGenericResults(self.graphs, self.units, stdout_secondary),
- )
+ def ProcessOutput(self, output, result_tracker, count):
+ result_tracker.AddRunnableDuration(self, output.duration)
+ self.ConsumeOutput(output, result_tracker)
def MakeGraphConfig(suite, arch, parent):
@@ -602,10 +535,6 @@ def MakeGraphConfig(suite, arch, parent):
else:
# This graph has no subgraphs, it's a leaf.
return RunnableTraceConfig(suite, parent, arch)
- elif suite.get('generic'):
- # This is a generic suite definition. It is either a runnable executable
- # or has a main js file.
- return RunnableGenericConfig(suite, parent, arch)
elif suite.get('tests'):
# This is neither a leaf nor a runnable.
return GraphConfig(suite, parent, arch)
@@ -645,74 +574,85 @@ def FlattenRunnables(node, node_cb):
class Platform(object):
- def __init__(self, options):
- self.shell_dir = options.shell_dir
- self.shell_dir_secondary = options.shell_dir_secondary
- self.extra_flags = options.extra_flags.split()
- self.options = options
+ def __init__(self, args):
+ self.shell_dir = args.shell_dir
+ self.shell_dir_secondary = args.shell_dir_secondary
+ self.extra_flags = args.extra_flags.split()
+ self.args = args
@staticmethod
- def ReadBuildConfig(options):
- config_path = os.path.join(options.shell_dir, 'v8_build_config.json')
+ def ReadBuildConfig(args):
+ config_path = os.path.join(args.shell_dir, 'v8_build_config.json')
if not os.path.isfile(config_path):
return {}
with open(config_path) as f:
return json.load(f)
@staticmethod
- def GetPlatform(options):
- if Platform.ReadBuildConfig(options).get('is_android', False):
- return AndroidPlatform(options)
+ def GetPlatform(args):
+ if Platform.ReadBuildConfig(args).get('is_android', False):
+ return AndroidPlatform(args)
else:
- return DesktopPlatform(options)
+ return DesktopPlatform(args)
def _Run(self, runnable, count, secondary=False):
raise NotImplementedError() # pragma: no cover
- def _TimedRun(self, runnable, count, secondary=False):
- runnable_start_time = datetime.datetime.utcnow()
- stdout = self._Run(runnable, count, secondary)
- runnable_duration = datetime.datetime.utcnow() - runnable_start_time
- if runnable_duration.total_seconds() > 0.9 * runnable.timeout:
- runnable.has_near_timeouts = True
- return stdout
+ def _LoggedRun(self, runnable, count, secondary=False):
+ suffix = ' - secondary' if secondary else ''
+ title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
+ try:
+ output = self._Run(runnable, count, secondary)
+ except OSError:
+ logging.exception(title % 'OSError')
+ raise
+ if output.stdout:
+ logging.info(title % 'Stdout' + '\n%s', output.stdout)
+ if output.stderr: # pragma: no cover
+ # Print stderr for debugging.
+ logging.info(title % 'Stderr' + '\n%s', output.stderr)
+ logging.warning('>>> Test timed out after %ss.', runnable.timeout)
+ if output.exit_code != 0:
+ logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
+ return output
- def Run(self, runnable, count):
+ def Run(self, runnable, count, secondary):
"""Execute the benchmark's main file.
- If options.shell_dir_secondary is specified, the benchmark is run twice,
- e.g. with and without patch.
Args:
runnable: A Runnable benchmark instance.
count: The number of this (repeated) run.
- Returns: A tuple with the two benchmark outputs. The latter will be None if
- options.shell_dir_secondary was not specified.
+ secondary: True if secondary run should be executed.
+
+ Returns:
+ A tuple with the two benchmark outputs. The latter will be NULL_OUTPUT if
+ secondary is False.
"""
- stdout = self._TimedRun(runnable, count, secondary=False)
- if self.shell_dir_secondary:
- return stdout, self._TimedRun(runnable, count, secondary=True)
+ output = self._LoggedRun(runnable, count, secondary=False)
+ if secondary:
+ return output, self._LoggedRun(runnable, count, secondary=True)
else:
- return stdout, None
+ return output, NULL_OUTPUT
class DesktopPlatform(Platform):
- def __init__(self, options):
- super(DesktopPlatform, self).__init__(options)
+ def __init__(self, args):
+ super(DesktopPlatform, self).__init__(args)
self.command_prefix = []
# Setup command class to OS specific version.
- command.setup(utils.GuessOS(), options.device)
+ command.setup(utils.GuessOS(), args.device)
- if options.prioritize or options.affinitize != None:
+ if args.prioritize or args.affinitize != None:
self.command_prefix = ['schedtool']
- if options.prioritize:
+ if args.prioritize:
self.command_prefix += ['-n', '-20']
- if options.affinitize != None:
+ if args.affinitize != None:
# schedtool expects a bit pattern when setting affinity, where each
# bit set to '1' corresponds to a core where the process may run on.
# First bit corresponds to CPU 0. Since the 'affinitize' parameter is
# a core number, we need to map to said bit pattern.
- cpu = int(options.affinitize)
+ cpu = int(args.affinitize)
core = 1 << cpu
self.command_prefix += ['-a', ('0x%x' % core)]
self.command_prefix += ['-e']
@@ -728,28 +668,11 @@ class DesktopPlatform(Platform):
node.ChangeCWD(path)
def _Run(self, runnable, count, secondary=False):
- suffix = ' - secondary' if secondary else ''
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
- title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
- try:
- output = cmd.execute()
- except OSError: # pragma: no cover
- logging.exception(title % 'OSError')
- raise
+ output = cmd.execute()
- logging.info(title % 'Stdout' + '\n%s', output.stdout)
- if output.stderr: # pragma: no cover
- # Print stderr for debugging.
- logging.info(title % 'Stderr' + '\n%s', output.stderr)
- if output.timed_out:
- logging.warning('>>> Test timed out after %ss.', runnable.timeout)
- runnable.has_timeouts = True
- raise TestFailedError()
- if output.exit_code != 0:
- logging.warning('>>> Test crashed.')
- raise TestFailedError()
- if '--prof' in self.extra_flags:
+ if output.IsSuccess() and '--prof' in self.extra_flags:
os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
if os_prefix:
tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
@@ -758,17 +681,17 @@ class DesktopPlatform(Platform):
logging.warning(
'Profiler option currently supported on Linux and Mac OS.')
- # time outputs to stderr
+ # /usr/bin/time outputs to stderr
if runnable.process_size:
- return output.stdout + output.stderr
- return output.stdout
+ output.stdout += output.stderr
+ return output
class AndroidPlatform(Platform): # pragma: no cover
- def __init__(self, options):
- super(AndroidPlatform, self).__init__(options)
- self.driver = android.android_driver(options.device)
+ def __init__(self, args):
+ super(AndroidPlatform, self).__init__(args)
+ self.driver = android.android_driver(args.device)
def PreExecution(self):
self.driver.set_high_perf_mode()
@@ -799,9 +722,7 @@ class AndroidPlatform(Platform): # pragma: no cover
self.driver.push_file(bench_abs, resource, bench_rel)
def _Run(self, runnable, count, secondary=False):
- suffix = ' - secondary' if secondary else ''
target_dir = 'bin_secondary' if secondary else 'bin'
- title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
self.driver.drop_ram_caches()
# Relative path to benchmark directory.
@@ -811,15 +732,17 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_rel = '.'
logcat_file = None
- if self.options.dump_logcats_to:
+ if self.args.dump_logcats_to:
runnable_name = '-'.join(runnable.graphs)
logcat_file = os.path.join(
- self.options.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
+ self.args.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
runnable_name, count + 1, '-secondary' if secondary else ''))
logging.debug('Dumping logcat into %s', logcat_file)
+ output = Output()
+ start = time.time()
try:
- stdout = self.driver.run(
+ output.stdout = self.driver.run(
target_dir=target_dir,
binary=runnable.binary,
args=runnable.GetCommandFlags(self.extra_flags),
@@ -827,20 +750,17 @@ class AndroidPlatform(Platform): # pragma: no cover
timeout=runnable.timeout,
logcat_file=logcat_file,
)
- logging.info(title % 'Stdout' + '\n%s', stdout)
except android.CommandFailedException as e:
- logging.info(title % 'Stdout' + '\n%s', e.output)
- logging.warning('>>> Test crashed.')
- raise TestFailedError()
+ output.stdout = e.output
+ output.exit_code = e.status
except android.TimeoutException as e:
- if e.output is not None:
- logging.info(title % 'Stdout' + '\n%s', e.output)
- logging.warning('>>> Test timed out after %ss.', runnable.timeout)
- runnable.has_timeouts = True
- raise TestFailedError()
+ output.stdout = e.output
+ output.timed_out = True
if runnable.process_size:
- return stdout + 'MaxMemory: Unsupported'
- return stdout
+ output.stdout += 'MaxMemory: Unsupported'
+ output.duration = time.time() - start
+ return output
+
class CustomMachineConfiguration:
def __init__(self, disable_aslr = False, governor = None):
@@ -946,146 +866,164 @@ class CustomMachineConfiguration:
raise Exception('Could not set CPU governor. Present value is %s'
% cur_value )
-def Main(args):
- parser = optparse.OptionParser()
- parser.add_option('--android-build-tools', help='Deprecated.')
- parser.add_option('--arch',
- help=('The architecture to run tests for, '
- '"auto" or "native" for auto-detect'),
- default='x64')
- parser.add_option('--buildbot',
- help='Adapt to path structure used on buildbots and adds '
- 'timestamps/level to all logged status messages',
- default=False, action='store_true')
- parser.add_option('-d', '--device',
- help='The device ID to run Android tests on. If not given '
- 'it will be autodetected.')
- parser.add_option('--extra-flags',
- help='Additional flags to pass to the test executable',
- default='')
- parser.add_option('--json-test-results',
- help='Path to a file for storing json results.')
- parser.add_option('--json-test-results-secondary',
- '--json-test-results-no-patch', # TODO(sergiyb): Deprecate.
- help='Path to a file for storing json results from run '
- 'without patch or for reference build run.')
- parser.add_option('--outdir', help='Base directory with compile output',
- default='out')
- parser.add_option('--outdir-secondary',
- '--outdir-no-patch', # TODO(sergiyb): Deprecate.
- help='Base directory with compile output without patch or '
- 'for reference build')
- parser.add_option('--binary-override-path',
- help='JavaScript engine binary. By default, d8 under '
- 'architecture-specific build dir. '
- 'Not supported in conjunction with outdir-secondary.')
- parser.add_option('--prioritize',
- help='Raise the priority to nice -20 for the benchmarking '
- 'process.Requires Linux, schedtool, and sudo privileges.',
- default=False, action='store_true')
- parser.add_option('--affinitize',
- help='Run benchmarking process on the specified core. '
- 'For example: '
- '--affinitize=0 will run the benchmark process on core 0. '
- '--affinitize=3 will run the benchmark process on core 3. '
- 'Requires Linux, schedtool, and sudo privileges.',
- default=None)
- parser.add_option('--noaslr',
- help='Disable ASLR for the duration of the benchmarked '
- 'process. Requires Linux and sudo privileges.',
- default=False, action='store_true')
- parser.add_option('--cpu-governor',
- help='Set cpu governor to specified policy for the '
- 'duration of the benchmarked process. Typical options: '
- '"powersave" for more stable results, or "performance" '
- 'for shorter completion time of suite, with potentially '
- 'more noise in results.')
- parser.add_option('--filter',
- help='Only run the benchmarks beginning with this string. '
- 'For example: '
- '--filter=JSTests/TypedArrays/ will run only TypedArray '
- 'benchmarks from the JSTests suite.',
- default='')
- parser.add_option('--run-count-multiplier', default=1, type='int',
- help='Multipled used to increase number of times each test '
- 'is retried.')
- parser.add_option('--dump-logcats-to',
- help='Writes logcat output from each test into specified '
- 'directory. Only supported for android targets.')
-
- (options, args) = parser.parse_args(args)
- logging.basicConfig(
- level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
+class MaxTotalDurationReachedError(Exception):
+ """Exception used to stop running tests when max total duration is reached."""
+ pass
- if len(args) == 0: # pragma: no cover
- parser.print_help()
- return INFRA_FAILURE_RETCODE
- if options.arch in ['auto', 'native']: # pragma: no cover
- options.arch = ARCH_GUESS
+def Main(argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--arch',
+ help='The architecture to run tests for. Pass "auto" '
+ 'to auto-detect.', default='x64',
+ choices=SUPPORTED_ARCHS + ['auto'])
+ parser.add_argument('--buildbot',
+ help='Adapt to path structure used on buildbots and adds '
+ 'timestamps/level to all logged status messages',
+ default=False, action='store_true')
+ parser.add_argument('-d', '--device',
+ help='The device ID to run Android tests on. If not '
+ 'given it will be autodetected.')
+ parser.add_argument('--extra-flags',
+ help='Additional flags to pass to the test executable',
+ default='')
+ parser.add_argument('--json-test-results',
+ help='Path to a file for storing json results.')
+ parser.add_argument('--json-test-results-secondary',
+ help='Path to a file for storing json results from run '
+ 'without patch or for reference build run.')
+ parser.add_argument('--outdir', help='Base directory with compile output',
+ default='out')
+ parser.add_argument('--outdir-secondary',
+ help='Base directory with compile output without patch '
+ 'or for reference build')
+ parser.add_argument('--binary-override-path',
+ help='JavaScript engine binary. By default, d8 under '
+ 'architecture-specific build dir. '
+ 'Not supported in conjunction with outdir-secondary.')
+ parser.add_argument('--prioritize',
+ help='Raise the priority to nice -20 for the '
+ 'benchmarking process.Requires Linux, schedtool, and '
+ 'sudo privileges.', default=False, action='store_true')
+ parser.add_argument('--affinitize',
+ help='Run benchmarking process on the specified core. '
+ 'For example: --affinitize=0 will run the benchmark '
+ 'process on core 0. --affinitize=3 will run the '
+ 'benchmark process on core 3. Requires Linux, schedtool, '
+ 'and sudo privileges.', default=None)
+ parser.add_argument('--noaslr',
+ help='Disable ASLR for the duration of the benchmarked '
+ 'process. Requires Linux and sudo privileges.',
+ default=False, action='store_true')
+ parser.add_argument('--cpu-governor',
+ help='Set cpu governor to specified policy for the '
+ 'duration of the benchmarked process. Typical options: '
+ '"powersave" for more stable results, or "performance" '
+ 'for shorter completion time of suite, with potentially '
+ 'more noise in results.')
+ parser.add_argument('--filter',
+ help='Only run the benchmarks beginning with this '
+ 'string. For example: '
+ '--filter=JSTests/TypedArrays/ will run only TypedArray '
+ 'benchmarks from the JSTests suite.',
+ default='')
+ parser.add_argument('--confidence-level', type=int,
+ help='Repeatedly runs each benchmark until specified '
+ 'confidence level is reached. The value is interpreted '
+ 'as the number of standard deviations from the mean that '
+ 'all values must lie within. Typical values are 1, 2 and '
+ '3 and correspond to 68%, 95% and 99.7% probability that '
+ 'the measured value is within 0.1% of the true value. '
+ 'Larger values result in more retries and thus longer '
+ 'runtime, but also provide more reliable results. Also '
+ 'see --max-total-duration flag.')
+ parser.add_argument('--max-total-duration', type=int, default=7140, # 1h 59m
+ help='Max total duration in seconds allowed for retries '
+ 'across all tests. This is especially useful in '
+ 'combination with the --confidence-level flag.')
+ parser.add_argument('--dump-logcats-to',
+ help='Writes logcat output from each test into specified '
+ 'directory. Only supported for android targets.')
+ parser.add_argument('--run-count', type=int, default=0,
+ help='Override the run count specified by the test '
+ 'suite. The default 0 uses the suite\'s config.')
+ parser.add_argument('-v', '--verbose', default=False, action='store_true',
+ help='Be verbose and print debug output.')
+ parser.add_argument('suite', nargs='+', help='Path to the suite config file.')
- if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
- logging.error('Unknown architecture %s', options.arch)
+ try:
+ args = parser.parse_args(argv)
+ except SystemExit:
return INFRA_FAILURE_RETCODE
- if (options.json_test_results_secondary and
- not options.outdir_secondary): # pragma: no cover
+ logging.basicConfig(
+ level=logging.DEBUG if args.verbose else logging.INFO,
+ format='%(asctime)s %(levelname)-8s %(message)s')
+
+ if args.arch == 'auto': # pragma: no cover
+ args.arch = utils.DefaultArch()
+ if args.arch not in SUPPORTED_ARCHS:
+ logging.error(
+ 'Auto-detected architecture "%s" is not supported.', args.arch)
+ return INFRA_FAILURE_RETCODE
+
+ if (args.json_test_results_secondary and
+ not args.outdir_secondary): # pragma: no cover
logging.error('For writing secondary json test results, a secondary outdir '
'patch must be specified.')
return INFRA_FAILURE_RETCODE
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
- if options.buildbot:
+ if args.buildbot:
build_config = 'Release'
else:
- build_config = '%s.release' % options.arch
+ build_config = '%s.release' % args.arch
- if options.binary_override_path == None:
- options.shell_dir = os.path.join(workspace, options.outdir, build_config)
+ if args.binary_override_path == None:
+ args.shell_dir = os.path.join(workspace, args.outdir, build_config)
default_binary_name = 'd8'
else:
- if not os.path.isfile(options.binary_override_path):
+ if not os.path.isfile(args.binary_override_path):
logging.error('binary-override-path must be a file name')
return INFRA_FAILURE_RETCODE
- if options.outdir_secondary:
+ if args.outdir_secondary:
logging.error('specify either binary-override-path or outdir-secondary')
return INFRA_FAILURE_RETCODE
- options.shell_dir = os.path.abspath(
- os.path.dirname(options.binary_override_path))
- default_binary_name = os.path.basename(options.binary_override_path)
+ args.shell_dir = os.path.abspath(
+ os.path.dirname(args.binary_override_path))
+ default_binary_name = os.path.basename(args.binary_override_path)
- if options.outdir_secondary:
- options.shell_dir_secondary = os.path.join(
- workspace, options.outdir_secondary, build_config)
+ if args.outdir_secondary:
+ args.shell_dir_secondary = os.path.join(
+ workspace, args.outdir_secondary, build_config)
else:
- options.shell_dir_secondary = None
+ args.shell_dir_secondary = None
- if options.json_test_results:
- options.json_test_results = os.path.abspath(options.json_test_results)
+ if args.json_test_results:
+ args.json_test_results = os.path.abspath(args.json_test_results)
- if options.json_test_results_secondary:
- options.json_test_results_secondary = os.path.abspath(
- options.json_test_results_secondary)
+ if args.json_test_results_secondary:
+ args.json_test_results_secondary = os.path.abspath(
+ args.json_test_results_secondary)
# Ensure all arguments have absolute path before we start changing current
# directory.
- args = map(os.path.abspath, args)
+ args.suite = map(os.path.abspath, args.suite)
prev_aslr = None
prev_cpu_gov = None
- platform = Platform.GetPlatform(options)
-
- results = Results()
- results_secondary = Results()
- # We use list here to allow modification in nested function below.
- have_failed_tests = [False]
- with CustomMachineConfiguration(governor = options.cpu_governor,
- disable_aslr = options.noaslr) as conf:
- for path in args:
+ platform = Platform.GetPlatform(args)
+
+ result_tracker = ResultTracker()
+ result_tracker_secondary = ResultTracker()
+ have_failed_tests = False
+ with CustomMachineConfiguration(governor = args.cpu_governor,
+ disable_aslr = args.noaslr) as conf:
+ for path in args.suite:
if not os.path.exists(path): # pragma: no cover
- results.errors.append('Configuration file %s does not exist.' % path)
+ result_tracker.AddError('Configuration file %s does not exist.' % path)
continue
with open(path) as f:
@@ -1099,59 +1037,78 @@ def Main(args):
# Build the graph/trace tree structure.
default_parent = DefaultSentinel(default_binary_name)
- root = BuildGraphConfigs(suite, options.arch, default_parent)
+ root = BuildGraphConfigs(suite, args.arch, default_parent)
# Callback to be called on each node on traversal.
def NodeCB(node):
platform.PreTests(node, path)
# Traverse graph/trace tree and iterate over all runnables.
- for runnable in FlattenRunnables(root, NodeCB):
- runnable_name = '/'.join(runnable.graphs)
- if (not runnable_name.startswith(options.filter) and
- runnable_name + '/' != options.filter):
- continue
- logging.info('>>> Running suite: %s', runnable_name)
-
- def Runner():
- """Output generator that reruns several times."""
- total_runs = runnable.run_count * options.run_count_multiplier
- for i in range(0, max(1, total_runs)):
+ start = time.time()
+ try:
+ for runnable in FlattenRunnables(root, NodeCB):
+ runnable_name = '/'.join(runnable.graphs)
+ if (not runnable_name.startswith(args.filter) and
+ runnable_name + '/' != args.filter):
+ continue
+ logging.info('>>> Running suite: %s', runnable_name)
+
+ def RunGenerator(runnable):
+ if args.confidence_level:
+ counter = 0
+ while not result_tracker.HasEnoughRuns(
+ runnable, args.confidence_level):
+ yield counter
+ counter += 1
+ else:
+ for i in range(0, max(1, args.run_count or runnable.run_count)):
+ yield i
+
+ for i in RunGenerator(runnable):
attempts_left = runnable.retry_count + 1
while attempts_left:
- try:
- yield platform.Run(runnable, i)
- except TestFailedError:
- attempts_left -= 1
- if not attempts_left: # ignore failures until last attempt
- have_failed_tests[0] = True
- else:
- logging.info('>>> Retrying suite: %s', runnable_name)
- else:
+ total_duration = time.time() - start
+ if total_duration > args.max_total_duration:
+ logging.info(
+ '>>> Stopping now since running for too long (%ds > %ds)',
+ total_duration, args.max_total_duration)
+ raise MaxTotalDurationReachedError()
+
+ output, output_secondary = platform.Run(
+ runnable, i, secondary=args.shell_dir_secondary)
+ result_tracker.AddRunnableDuration(runnable, output.duration)
+ result_tracker_secondary.AddRunnableDuration(
+ runnable, output_secondary.duration)
+
+ if output.IsSuccess() and output_secondary.IsSuccess():
+ runnable.ProcessOutput(output, result_tracker, i)
+ if output_secondary is not NULL_OUTPUT:
+ runnable.ProcessOutput(
+ output_secondary, result_tracker_secondary, i)
break
- # Let runnable iterate over all runs and handle output.
- result, result_secondary = runnable.Run(
- Runner, trybot=options.shell_dir_secondary)
- results += result
- results_secondary += result_secondary
- if runnable.has_timeouts:
- results.timeouts.append(runnable_name)
- if runnable.has_near_timeouts:
- results.near_timeouts.append(runnable_name)
+ attempts_left -= 1
+ have_failed_tests = True
+ if attempts_left:
+ logging.info('>>> Retrying suite: %s', runnable_name)
+ except MaxTotalDurationReachedError:
+ have_failed_tests = True
+
platform.PostExecution()
- if options.json_test_results:
- results.WriteToFile(options.json_test_results)
+ if args.json_test_results:
+ result_tracker.WriteToFile(args.json_test_results)
else: # pragma: no cover
- print(results)
+ print('Primary results:', result_tracker)
- if options.json_test_results_secondary:
- results_secondary.WriteToFile(options.json_test_results_secondary)
- else: # pragma: no cover
- print(results_secondary)
+ if args.shell_dir_secondary:
+ if args.json_test_results_secondary:
+ result_tracker_secondary.WriteToFile(args.json_test_results_secondary)
+ else: # pragma: no cover
+ print('Secondary results:', result_tracker_secondary)
- if results.errors or have_failed_tests[0]:
+ if (result_tracker.errors or result_tracker_secondary.errors or
+ have_failed_tests):
return 1
return 0
diff --git a/deps/v8/tools/shell-utils.h b/deps/v8/tools/shell-utils.h
index bfd729d9b5..b41d3277aa 100644
--- a/deps/v8/tools/shell-utils.h
+++ b/deps/v8/tools/shell-utils.h
@@ -27,7 +27,7 @@
// Utility functions used by parser-shell.
-#include "src/globals.h"
+#include "src/common/globals.h"
#include <stdio.h>
diff --git a/deps/v8/tools/testrunner/OWNERS b/deps/v8/tools/testrunner/OWNERS
index c8693c972c..50b5741785 100644
--- a/deps/v8/tools/testrunner/OWNERS
+++ b/deps/v8/tools/testrunner/OWNERS
@@ -1,5 +1,3 @@
set noparent
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 5e6a3c11a5..caed59356e 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -185,11 +185,13 @@ class BuildConfig(object):
self.is_android = build_config['is_android']
self.is_clang = build_config['is_clang']
self.is_debug = build_config['is_debug']
+ self.is_full_debug = build_config['is_full_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
self.no_snap = not build_config['v8_use_snapshot']
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
+ # TODO(machenbach): We only have ubsan not ubsan_vptr.
self.ubsan_vptr = build_config['is_ubsan_vptr']
self.embedded_builtins = build_config['v8_enable_embedded_builtins']
self.verify_csa = build_config['v8_enable_verify_csa']
@@ -200,6 +202,11 @@ class BuildConfig(object):
self.mips_arch_variant = build_config['mips_arch_variant']
self.mips_use_msa = build_config['mips_use_msa']
+ @property
+ def use_sanitizer(self):
+ return (self.asan or self.cfi_vptr or self.msan or self.tsan or
+ self.ubsan_vptr)
+
def __str__(self):
detected_options = []
@@ -341,9 +348,6 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -673,6 +677,7 @@ class BaseTestRunner(object):
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"is_clang": self.build_config.is_clang,
+ "is_full_debug": self.build_config.is_full_debug,
"mips_arch_variant": mips_arch_variant,
"mode": self.mode_options.status_mode
if not self.build_config.dcheck_always_on
@@ -712,15 +717,18 @@ class BaseTestRunner(object):
)
def _timeout_scalefactor(self, options):
+ """Increases timeout for slow build configurations."""
factor = self.mode_options.timeout_scalefactor
-
- # Simulators are slow, therefore allow a longer timeout.
if self.build_config.arch in SLOW_ARCHS:
+ factor *= 4
+ if self.build_config.lite_mode:
factor *= 2
-
- # Predictable mode is slower.
if self.build_config.predictable:
- factor *= 2
+ factor *= 4
+ if self.build_config.use_sanitizer:
+ factor *= 1.5
+ if self.build_config.is_full_debug:
+ factor *= 4
return factor
@@ -779,9 +787,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index 5eb0d8b20a..b68252c139 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -129,8 +129,12 @@ class BaseCommand(object):
def _abort(self, process, abort_called):
abort_called[0] = True
try:
+ print('Attempting to kill process %s' % process.pid)
+ sys.stdout.flush()
self._kill_process(process)
- except OSError:
+ except OSError as e:
+ print(e)
+ sys.stdout.flush()
pass
def __str__(self):
@@ -207,9 +211,6 @@ class WindowsCommand(BaseCommand):
return subprocess.list2cmdline(self._to_args_list())
def _kill_process(self, process):
- if self.verbose:
- print('Attempting to kill process %d' % process.pid)
- sys.stdout.flush()
tk = subprocess.Popen(
'taskkill /T /F /PID %d' % process.pid,
stdout=subprocess.PIPE,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index ed9b1b87f5..dc92db6099 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -23,7 +23,9 @@ ALL_VARIANT_FLAGS = {
"slow_path": [["--force-slow-path"]],
"stress": [["--stress-opt", "--always-opt", "--no-liftoff",
"--no-wasm-tier-up"]],
- "stress_background_compile": [["--stress-background-compile"]],
+ "stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
+ "--wasm-code-gc",
+ "--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index 74cec56a85..78aa63d4c9 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -34,7 +34,8 @@ from ..local import utils
class Output(object):
- def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
+ def __init__(self, exit_code=0, timed_out=False, stdout=None, stderr=None,
+ pid=None, duration=None):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
@@ -61,3 +62,16 @@ class Output(object):
def HasTimedOut(self):
return self.timed_out
+
+ def IsSuccess(self):
+ return not self.HasCrashed() and not self.HasTimedOut()
+
+
+class _NullOutput(Output):
+ """Useful to signal that the binary has not been run."""
+ def __init__(self):
+ super(_NullOutput, self).__init__()
+
+
+# Default instance of the _NullOutput class above.
+NULL_OUTPUT = _NullOutput()
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 80c7c29ed1..6d4dcd1352 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -244,13 +244,16 @@ class TestCase(object):
timeout = self._test_config.timeout
if "--stress-opt" in params:
timeout *= 4
+ if "--jitless" in params:
+ timeout *= 2
+ if "--no-opt" in params:
+ timeout *= 2
if "--noenable-vfp3" in params:
timeout *= 2
if self._get_timeout_param() == TIMEOUT_LONG:
timeout *= 10
-
- # TODO(majeski): make it slow outcome dependent.
- timeout *= 2
+ if self.is_slow:
+ timeout *= 4
return timeout
def get_shell(self):
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 2a08d2d97e..bc79c015bd 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -34,7 +34,7 @@ VARIANTS = ['default']
MORE_VARIANTS = [
'jitless',
'stress',
- 'stress_background_compile',
+ 'stress_js_bg_compile_wasm_code_gc',
'stress_incremental_marking',
]
@@ -53,7 +53,8 @@ GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
'--concurrent-recompilation-queue-length=64',
'--concurrent-recompilation-delay=500',
'--concurrent-recompilation',
- '--stress-flush-bytecode']
+ '--stress-flush-bytecode',
+ '--wasm-code-gc', '--stress-wasm-code-gc']
RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
'--stress-compaction-random']
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 12d9503088..aad6740c1c 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -7,11 +7,17 @@ from __future__ import print_function
import json
import os
+import platform
+import subprocess
import sys
import time
from . import base
-from ..local import junit_output
+
+
+# Base dir of the build products for Release and Debug.
+OUT_DIR = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
def print_failure_header(test):
@@ -121,11 +127,27 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
self._print('Done running %s %s: %s' % (
test, test.variant or 'default', outcome))
+ # TODO(machenbach): Remove this platform specific hack and implement a proper
+ # feedback channel from the workers, providing which tests are currently run.
+ def _print_processes_linux(self):
+ if platform.system() == 'Linux':
+ try:
+ cmd = 'ps -aux | grep "%s"' % OUT_DIR
+ output = subprocess.check_output(cmd, shell=True)
+ self._print('List of processes:')
+ for line in (output or '').splitlines():
+ # Show command with pid, but other process info cut off.
+ self._print('pid: %s cmd: %s' %
+ (line.split()[1], line[line.index(OUT_DIR):]))
+ except:
+ pass
+
def _on_heartbeat(self):
if time.time() - self._last_printed_time > 30:
# Print something every 30 seconds to not get killed by an output
# timeout.
self._print('Still working...')
+ self._print_processes_linux()
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -259,45 +281,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
index bfebfc9e6a..32f8d6608e 100644
--- a/deps/v8/tools/tick-processor.html
+++ b/deps/v8/tools/tick-processor.html
@@ -89,6 +89,7 @@ function start_process() {
ignoreUnknown: false,
separateIc: true,
targetRootFS: '',
+ apkEmbeddedLibrary: '',
nm: 'nm'
};
@@ -100,7 +101,7 @@ function start_process() {
var tickProcessor = new TickProcessor(
new (entriesProviders[DEFAULTS.platform])(
- DEFAULTS.nm, DEFAULTS.targetRootFS),
+ DEFAULTS.nm, DEFAULTS.targetRootFS, DEFAULTS.apkEmbeddedLibrary),
DEFAULTS.separateIc, DEFAULTS.callGraphSize,
DEFAULTS.ignoreUnknown, DEFAULTS.stateFilter);
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 58844c127e..93331cfa2d 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -62,7 +62,8 @@ if (params.sourceMap) {
sourceMap = SourceMap.load(params.sourceMap);
}
var tickProcessor = new TickProcessor(
- new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+ new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+ params.apkEmbeddedLibrary),
params.separateIc,
params.separateBytecodes,
params.separateBuiltins,
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 31acd3d7be..ddb6d029f6 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -685,11 +685,12 @@ CppEntriesProvider.prototype.parseNextLine = function() {
};
-function UnixCppEntriesProvider(nmExec, targetRootFS) {
+function UnixCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
this.symbols = [];
this.parsePos = 0;
this.nmExec = nmExec;
this.targetRootFS = targetRootFS;
+ this.apkEmbeddedLibrary = apkEmbeddedLibrary;
this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
};
inherits(UnixCppEntriesProvider, CppEntriesProvider);
@@ -697,6 +698,9 @@ inherits(UnixCppEntriesProvider, CppEntriesProvider);
UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
this.parsePos = 0;
+ if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
+ libName = this.apkEmbeddedLibrary;
+ }
libName = this.targetRootFS + libName;
try {
this.symbols = [
@@ -735,8 +739,8 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() {
};
-function MacCppEntriesProvider(nmExec, targetRootFS) {
- UnixCppEntriesProvider.call(this, nmExec, targetRootFS);
+function MacCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
+ UnixCppEntriesProvider.call(this, nmExec, targetRootFS, apkEmbeddedLibrary);
// Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
};
@@ -758,7 +762,8 @@ MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
};
-function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS) {
+function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS,
+ _ignored_apkEmbeddedLibrary) {
this.targetRootFS = targetRootFS;
this.symbols = '';
this.parsePos = 0;
@@ -882,6 +887,8 @@ class ArgumentsProcessor extends BaseArgumentsProcessor {
'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
'--target': ['targetRootFS', '',
'Specify the target root directory for cross environment'],
+ '--apk-embedded-library': ['apkEmbeddedLibrary', '',
+ 'Specify the path of the embedded library for Android traces'],
'--range': ['range', 'auto,auto',
'Specify the range limit as [start],[end]'],
'--distortion': ['distortion', 0,
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 761f727e6f..51b588f90b 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -14,6 +15,8 @@ import sys
import re
from subprocess import Popen, PIPE
+kPercentEscape = r'α'; # Unicode alpha
+
def preprocess(input):
input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
@@ -46,14 +49,17 @@ def preprocess(input):
r'\n otherwise', input)
input = re.sub(r'(\n\s*\S[^\n]*\s)otherwise',
r'\1_OtheSaLi', input)
+ input = re.sub(r'@if\(', r'@iF(', input)
+ input = re.sub(r'@export', r'@eXpOrT', input)
+
+ # Special handing of '%' for intrinsics, turn the percent
+ # into a unicode character so that it gets treated as part of the
+ # intrinsic's name if it's already adjacent to it.
+ input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input)
+
return input
def postprocess(output):
- output = re.sub(r'%\s*RawDownCast', r'%RawDownCast', output)
- output = re.sub(r'%\s*RawConstexprCast', r'%RawConstexprCast', output)
- output = re.sub(r'%\s*FromConstexpr', r'%FromConstexpr', output)
- output = re.sub(r'%\s*Allocate', r'%Allocate', output)
- output = re.sub(r'%\s*GetAllocationBaseSize', r'%GetAllocationBaseSize', output)
output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1 labels\2', output)
@@ -79,6 +85,9 @@ def postprocess(output):
r"\n\1otherwise", output)
output = re.sub(r'_OtheSaLi',
r"otherwise", output)
+ output = re.sub(r'@iF\(', r'@if(', output)
+ output = re.sub(r'@eXpOrT',
+ r"@export", output)
while True:
old = output
@@ -87,6 +96,8 @@ def postprocess(output):
if old == output:
break;
+ output = re.sub(kPercentEscape, r'%', output)
+
return output
def process(filename, lint, should_format):
diff --git a/deps/v8/tools/torque/make-torque-parser.py b/deps/v8/tools/torque/make-torque-parser.py
deleted file mode 100755
index 807b68bf36..0000000000
--- a/deps/v8/tools/torque/make-torque-parser.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This program either generates the parser files for Torque, generating
-the source and header files directly in V8's src directory."""
-
-import subprocess
-import sys
-import os
-import ntpath
-import re
-
-cwd = os.getcwd()
-tools = ntpath.dirname(sys.argv[0]);
-grammar = tools + '/../../src/torque/Torque.g4'
-basename = ntpath.basename(grammar)
-dirname = ntpath.dirname(grammar)
-os.chdir(dirname)
-cargs = ['java', '-Xmx500M', 'org.antlr.v4.Tool', '-visitor', basename]
-result = subprocess.call(cargs)
-os.chdir(cwd)
-
-def fix_file(filename):
- is_header = re.search(r'\.h', filename) is not None;
- header_macro = filename.upper();
- header_macro = re.sub('\.', '_', header_macro);
- header_macro = "V8_TORQUE_" + header_macro + '_';
-
- copyright = '// Copyright 2018 the V8 project authors. All rights reserved.\n'
- copyright += '// Use of this source code is governed by a BSD-style license that can be\n'
- copyright += '// found in the LICENSE file.\n'
- file_path = tools + '/../../src/torque/' + filename;
- temp_file_path = file_path + '.tmp'
- output_file = open(temp_file_path, 'w')
- output_file.write(copyright);
- if is_header:
- output_file.write('#ifndef ' + header_macro + '\n');
- output_file.write('#define ' + header_macro + '\n');
-
- with open(file_path) as f:
- content = f.readlines()
- for x in content:
- x = re.sub(';;', ';', x)
- x = re.sub('antlr4-runtime\.h', './antlr4-runtime.h', x)
- x = re.sub(' TorqueParser.antlr4', ' explicit TorqueParser(antlr4', x)
- x = re.sub(' TorqueLexer.antlr4', ' explicit TorqueLexer(antlr4', x)
- if not re.search('= 0', x):
- x = re.sub('virtual', '', x)
- output_file.write(x)
-
- if is_header:
- output_file.write('#endif // ' + header_macro + '\n');
- output_file.close();
-
- subprocess.call(['rm', file_path])
- subprocess.call(['mv', temp_file_path, file_path])
-
-fix_file('TorqueBaseListener.h');
-fix_file('TorqueBaseListener.cpp');
-fix_file('TorqueBaseVisitor.h');
-fix_file('TorqueBaseVisitor.cpp');
-fix_file('TorqueLexer.h');
-fix_file('TorqueLexer.cpp');
-fix_file('TorqueParser.h');
-fix_file('TorqueParser.cpp');
-fix_file('TorqueListener.h');
-fix_file('TorqueListener.cpp');
-fix_file('TorqueVisitor.h');
-fix_file('TorqueVisitor.cpp');
diff --git a/deps/v8/tools/torque/vim-torque/syntax/torque.vim b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
index c2e4ba0f7a..1a4ce987c7 100644
--- a/deps/v8/tools/torque/vim-torque/syntax/torque.vim
+++ b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -30,7 +30,7 @@ syn keyword torqueFunction macro builtin runtime intrinsic
syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast
syn keyword torqueLabel case
syn keyword torqueMatching try label catch
-syn keyword torqueModifier extern javascript constexpr transitioning transient weak
+syn keyword torqueModifier extern javascript constexpr transitioning transient weak export
syn match torqueNumber /\v<[0-9]+(\.[0-9]*)?>/
syn match torqueNumber /\v<0x[0-9a-fA-F]+>/
syn keyword torqueOperator operator
diff --git a/deps/v8/tools/torque/vscode-torque/package.json b/deps/v8/tools/torque/vscode-torque/package.json
index 42174a6c9f..16c8095f86 100644
--- a/deps/v8/tools/torque/vscode-torque/package.json
+++ b/deps/v8/tools/torque/vscode-torque/package.json
@@ -41,13 +41,13 @@
"torque.trace.server": {
"type": "string",
"enum": [
- "off",
- "messages",
- "verbose"
+ "off",
+ "messages",
+ "verbose"
],
"default": "off",
"description": "Trace the communication with the Torque language server from VSCode."
- }
+ }
}
},
"languages": [
diff --git a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
index cbbf381da8..dea5be517b 100644
--- a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
+++ b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
@@ -1,175 +1,177 @@
{
- "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
- "name": "Torque",
- "patterns": [
- {
- "name": "comment.line.double-slash.torque",
- "begin": "//",
- "end": "$"
- },
- {
- "name": "comment.block.torque",
- "begin": "/\\*",
- "end": "\\*/"
- },
- {
- "name": "support.function.torque",
- "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
- },
- {
- "name": "constant.other.torque",
- "match": "\\b(true|True|false|False|Undefined|Hole|Null|k[A-Z][A-Za-z0-9]+)\\b"
- },
- {
- "begin": "\\b<(?=[A-Za-z][0-9A-Za-z_|, ]*>)",
- "end": ">",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "begin": "\\b(?=extern\\b)",
- "end": ";",
- "patterns": [
- {
- "begin": "\\)\\(|(?=(\\b[a-zA-Z0-9_]+)\\((?!\\s*implicit))",
- "end": "\\)",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "include": "#common"
- }
- ]
- },
- {
- "begin": "\\b(type)\\b",
- "end": ";",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- }
- },
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "\\b([A-Za-z][0-9A-Za-z_]*)\\b"
- }
- ]
- },
- {
- "name": "keyword.control.torque",
- "match": "#include"
- },
- {
- "include": "#common"
- }
- ],
- "repository": {
- "common": {
- "patterns": [
- {
- "match": "\\b(extends)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "name": "keyword.control.torque",
- "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
- },
- {
- "name": "keyword.other.torque",
- "match": "\\b(constexpr|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|weak|extern|const|typeswitch|case|transient|transitioning|operator|namespace)\\b"
- },
- {
- "name": "keyword.operator.torque",
- "match": "\\b(=|\\*=)\\b"
- },
- {
- "match": "\\b(class)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "match": "\\b(struct)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "name": "string.quoted.double.torque",
- "begin": "\"",
- "end": "\"",
- "patterns": [
- {
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }
- ]
- },
- {
- "name": "string.quoted.single.torque",
- "begin": "'",
- "end": "'",
- "patterns": [
- {
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }
- ]
- },
- {
- "begin": ":(\\s*)?",
- "end": "(?=(generates|[^0-9A-Za-z_| ]))",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "name": "support.function.torque",
- "match": "\\b[A-Za-z0-9_]+\\b(?=(<[ ,:A-Za-z0-9_]+>)?\\()"
- }
- ]
- }
- },
- "scopeName": "source.torque"
-} \ No newline at end of file
+ "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
+ "name": "Torque",
+ "patterns": [
+ {
+ "name": "comment.line.double-slash.torque",
+ "begin": "//",
+ "end": "$"
+ },
+ {
+ "name": "comment.block.torque",
+ "begin": "/\\*",
+ "end": "\\*/"
+ },
+ {
+ "name": "support.function.torque",
+ "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
+ },
+ {
+ "name": "constant.other.torque",
+ "match": "\\b(true|True|false|False|Undefined|Hole|Null|k[A-Z][A-Za-z0-9]+)\\b"
+ },
+ {
+ "begin": "\\b<(?=[A-Za-z][0-9A-Za-z_|, ]*>)",
+ "end": ">",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
+ },
+ {
+ "begin": "\\b(?=(macro|runtime|builtin)\\b)",
+ "end": ";|\\{",
+ "patterns": [
+ {
+ "begin": "\\(",
+ "end": "\\)",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "match": "(([A-Za-z][0-9A-Za-z_]*):\\s*)?([A-Za-z][0-9A-Za-z_]*)",
+ "captures":{
+ "3": {"name": "support.type.torque"}
+ }
+ }
+ ]
+ },
+ {
+ "include": "#common"
+ }
+ ]
+ },
+ {
+ "begin": "\\b(type)\\b",
+ "end": ";",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ }
+ },
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "\\b([A-Za-z][0-9A-Za-z_]*)\\b"
+ }
+ ]
+ },
+ {
+ "name": "keyword.control.torque",
+ "match": "#include"
+ },
+ {
+ "include": "#common"
+ }
+ ],
+ "repository": {
+ "common": {
+ "patterns": [
+ {
+ "match": "\\b(extends)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "name": "keyword.control.torque",
+ "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
+ },
+ {
+ "name": "keyword.other.torque",
+ "match": "\\b(constexpr|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|weak|extern|const|typeswitch|case|transient|transitioning|operator|namespace|export)\\b"
+ },
+ {
+ "name": "keyword.operator.torque",
+ "match": "\\b(=|\\*=)\\b"
+ },
+ {
+ "match": "\\b(class|new)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "match": "\\b(struct)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "name": "string.quoted.double.torque",
+ "begin": "\"",
+ "end": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.torque",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "name": "string.quoted.single.torque",
+ "begin": "'",
+ "end": "'",
+ "patterns": [
+ {
+ "name": "constant.character.escape.torque",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "begin": ":(\\s*)?",
+ "end": "(?=(generates|[^0-9A-Za-z_| ]))",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
+ },
+ {
+ "name": "support.function.torque",
+ "match": "\\b[A-Za-z0-9_]+\\b(?=(<[ ,:A-Za-z0-9_]+>)?\\()"
+ }
+ ]
+ }
+ },
+ "scopeName": "source.torque"
+}
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 5e009ebd6b..083d224b2d 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -7,9 +7,7 @@
from __future__ import print_function
from collections import namedtuple
-import coverage
import json
-import mock
import os
import platform
import shutil
@@ -18,6 +16,9 @@ import sys
import tempfile
import unittest
+import coverage
+import mock
+
# Requires python-coverage and python-mock. Native python coverage
# version >= 3.7.1 should be installed to get the best speed.
@@ -31,6 +32,7 @@ V8_JSON = {
'path': ['.'],
'owners': ['username@chromium.org'],
'binary': 'd7',
+ 'timeout': 60,
'flags': ['--flag'],
'main': 'run.js',
'run_count': 1,
@@ -88,8 +90,6 @@ V8_GENERIC_JSON = {
'units': 'ms',
}
-Output = namedtuple('Output', 'stdout, stderr, timed_out, exit_code')
-
class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -99,8 +99,8 @@ class PerfTest(unittest.TestCase):
cls._cov.start()
import run_perf
from testrunner.local import command
- global command
- global run_perf
+ from testrunner.objects.output import Output, NULL_OUTPUT
+ global command, run_perf, Output, NULL_OUTPUT
@classmethod
def tearDownClass(cls):
@@ -127,9 +127,9 @@ class PerfTest(unittest.TestCase):
def _MockCommand(self, *args, **kwargs):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
- stderr=None,
timed_out=kwargs.get('timed_out', False),
- exit_code=kwargs.get('exit_code', 0))
+ exit_code=kwargs.get('exit_code', 0),
+ duration=42)
for arg in args[1]]
def create_cmd(*args, **kwargs):
cmd = mock.MagicMock()
@@ -145,7 +145,7 @@ class PerfTest(unittest.TestCase):
# Check that d8 is called from the correct cwd for each test run.
dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
- self.assertEquals(dirs.pop(), args[0])
+ self.assertEqual(dirs.pop(), args[0])
os.chdir = mock.MagicMock(side_effect=chdir)
subprocess.check_call = mock.MagicMock()
@@ -166,15 +166,24 @@ class PerfTest(unittest.TestCase):
return json.load(f)
def _VerifyResults(self, suite, units, traces, file_name=None):
- self.assertEquals([
+ self.assertListEqual(sorted([
{'units': units,
'graphs': [suite, trace['name']],
'results': trace['results'],
- 'stddev': trace['stddev']} for trace in traces],
- self._LoadResults(file_name)['traces'])
+ 'stddev': trace['stddev']} for trace in traces]),
+ sorted(self._LoadResults(file_name)['traces']))
+
+ def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
+ self.assertListEqual([
+ {
+ 'graphs': ['test'],
+ 'durations': [42] * runs,
+ 'timeout': timeout,
+ },
+ ], self._LoadResults(file_name)['runnables'])
def _VerifyErrors(self, errors):
- self.assertEquals(errors, self._LoadResults()['errors'])
+ self.assertListEqual(errors, self._LoadResults()['errors'])
def _VerifyMock(self, binary, *args, **kwargs):
shell = os.path.join(os.path.dirname(BASE_DIR), binary)
@@ -185,7 +194,7 @@ class PerfTest(unittest.TestCase):
timeout=kwargs.get('timeout', 60))
def _VerifyMockMultiple(self, *args, **kwargs):
- self.assertEquals(len(args), len(command.Command.call_args_list))
+ self.assertEqual(len(args), len(command.Command.call_args_list))
for arg, actual in zip(args, command.Command.call_args_list):
expected = {
'cmd_prefix': [],
@@ -193,16 +202,17 @@ class PerfTest(unittest.TestCase):
'args': list(arg[1:]),
'timeout': kwargs.get('timeout', 60)
}
- self.assertEquals((expected, ), actual)
+ self.assertTupleEqual((expected, ), actual)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
+ self._VerifyRunnableDurations(1, 60)
self._VerifyErrors([])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
@@ -212,10 +222,10 @@ class PerfTest(unittest.TestCase):
test_input['test_flags'] = ['2', 'test_name']
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join(
@@ -230,10 +240,10 @@ class PerfTest(unittest.TestCase):
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('v8', 'ms', [
- {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join(
@@ -249,10 +259,59 @@ class PerfTest(unittest.TestCase):
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+ def testPerfectConfidenceRuns(self):
+ self._WriteTestInput(V8_JSON)
+ self._MockCommand(
+ ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'] * 10)
+ self.assertEqual(0, self._CallMain('--confidence-level', '1'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234] * 10, 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0] * 10, 'stddev': ''},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+ def testNoisyConfidenceRuns(self):
+ self._WriteTestInput(V8_JSON)
+ self._MockCommand(
+ ['.'],
+ reversed([
+ # First 10 runs are mandatory. DeltaBlue is slightly noisy.
+ 'x\nRichards: 1.234\nDeltaBlue: 10757567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10557567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ # Need 4 more runs for confidence in DeltaBlue results.
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ ]),
+ )
+ self.assertEqual(0, self._CallMain('--confidence-level', '1'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [1.234] * 14, 'stddev': ''},
+ {
+ 'name': 'DeltaBlue',
+ 'results': [10757567.0, 10557567.0] + [10657567.0] * 12,
+ 'stddev': '',
+ },
])
self._VerifyErrors([])
self._VerifyMock(os.path.join(
@@ -267,21 +326,21 @@ class PerfTest(unittest.TestCase):
'Simple: 3 ms.\n',
'Richards: 100\n',
'Richards: 50\n'])
- self.assertEquals(0, self._CallMain())
- self.assertEquals([
+ self.assertEqual(0, self._CallMain())
+ self.assertListEqual(sorted([
{'units': 'score',
'graphs': ['test', 'Richards'],
- 'results': ['50.0', '100.0'],
+ 'results': [50.0, 100.0],
'stddev': ''},
{'units': 'ms',
'graphs': ['test', 'Sub', 'Leaf'],
- 'results': ['3.0', '2.0', '1.0'],
+ 'results': [3.0, 2.0, 1.0],
'stddev': ''},
{'units': 'score',
'graphs': ['test', 'DeltaBlue'],
- 'results': ['200.0'],
+ 'results': [200.0],
'stddev': ''},
- ], self._LoadResults()['traces'])
+ ]), sorted(self._LoadResults()['traces']))
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
@@ -298,10 +357,10 @@ class PerfTest(unittest.TestCase):
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': '0.23'},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': '106'},
+ {'name': 'Richards', 'results': [1.234], 'stddev': '0.23'},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': '106'},
])
self._VerifyErrors([])
self._VerifyMock(
@@ -316,10 +375,10 @@ class PerfTest(unittest.TestCase):
'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
'Richards: 2\nRichards-stddev: 0.5\n'
'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
- self.assertEquals(1, self._CallMain())
+ self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['2.0', '3.0'], 'stddev': '0.7'},
- {'name': 'DeltaBlue', 'results': ['5.0', '6.0'], 'stddev': '0.8'},
+ {'name': 'Richards', 'results': [2.0, 3.0], 'stddev': '0.7'},
+ {'name': 'DeltaBlue', 'results': [5.0, 6.0], 'stddev': '0.8'},
])
self._VerifyErrors(
['Test test/Richards should only run once since a stddev is provided '
@@ -337,10 +396,10 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
- self.assertEquals(0, self._CallMain('--buildbot'))
+ self.assertEqual(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
@@ -353,11 +412,11 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
- self.assertEquals(0, self._CallMain('--buildbot'))
+ self.assertEqual(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
- {'name': 'Total', 'results': ['3626.49109719'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+ {'name': 'Total', 'results': [3626.491097190233], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
@@ -370,69 +429,38 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
- self.assertEquals(1, self._CallMain('--buildbot'))
+ self.assertEqual(1, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors(
['Regexp "^Richards: (.+)$" '
'returned a non-numeric for test test/Richards.',
- 'Not all traces have the same number of results.'])
+ 'Not all traces have produced results. Can not compute total for '
+ 'test.'])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
- self.assertEquals(1, self._CallMain())
+ self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors(
['Regexp "^Richards: (.+)$" did not match for test test/Richards.'])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
- def testOneRunGeneric(self):
- test_input = dict(V8_GENERIC_JSON)
- self._WriteTestInput(test_input)
- self._MockCommand(['.'], [
- 'RESULT Infra: Constant1= 11 count\n'
- 'RESULT Infra: Constant2= [10,5,10,15] count\n'
- 'RESULT Infra: Constant3= {12,1.2} count\n'
- 'RESULT Infra: Constant4= [10,5,error,15] count\n'])
- self.assertEquals(1, self._CallMain())
- self.assertEquals([
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant1'],
- 'results': ['11.0'],
- 'stddev': ''},
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant2'],
- 'results': ['10.0', '5.0', '10.0', '15.0'],
- 'stddev': ''},
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant3'],
- 'results': ['12.0'],
- 'stddev': '1.2'},
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant4'],
- 'results': [],
- 'stddev': ''},
- ], self._LoadResults()['traces'])
- self._VerifyErrors(['Found non-numeric in test/Infra/Constant4'])
- self._VerifyMock(os.path.join('out', 'x64.release', 'cc'), '--flag', '')
-
def testOneRunCrashed(self):
- self._WriteTestInput(V8_JSON)
+ test_input = dict(V8_JSON)
+ test_input['retry_count'] = 1
+ self._WriteTestInput(test_input)
self._MockCommand(
- ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'], exit_code=1)
- self.assertEquals(1, self._CallMain())
- self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': [], 'stddev': ''},
- ])
+ ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', ''],
+ exit_code=-1)
+ self.assertEqual(1, self._CallMain())
+ self._VerifyResults('test', 'score', [])
self._VerifyErrors([])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
@@ -440,13 +468,11 @@ class PerfTest(unittest.TestCase):
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
test_input['timeout'] = 70
+ test_input['retry_count'] = 0
self._WriteTestInput(test_input)
self._MockCommand(['.'], [''], timed_out=True)
- self.assertEquals(1, self._CallMain())
- self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': [], 'stddev': ''},
- ])
+ self.assertEqual(1, self._CallMain())
+ self._VerifyResults('test', 'score', [])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
'--flag', 'run.js', timeout=70)
@@ -458,16 +484,16 @@ class PerfTest(unittest.TestCase):
mock.patch('run_perf.AndroidPlatform.PreTests').start()
mock.patch(
'run_perf.AndroidPlatform.Run',
- return_value=(
- 'Richards: 1.234\nDeltaBlue: 10657567\n', None)).start()
+ return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'),
+ NULL_OUTPUT)).start()
mock.patch('testrunner.local.android._Driver', autospec=True).start()
mock.patch(
'run_perf.Platform.ReadBuildConfig',
return_value={'is_android': True}).start()
- self.assertEquals(0, self._CallMain('--arch', 'arm'))
+ self.assertEqual(0, self._CallMain('--arch', 'arm'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
def testTwoRuns_Trybot(self):
@@ -481,18 +507,19 @@ class PerfTest(unittest.TestCase):
'Richards: 100\nDeltaBlue: 20\n'])
test_output_secondary = os.path.join(
TEST_WORKSPACE, 'results_secondary.json')
- self.assertEquals(0, self._CallMain(
+ self.assertEqual(0, self._CallMain(
'--outdir-secondary', 'out-secondary',
'--json-test-results-secondary', test_output_secondary,
))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['100.0', '200.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['20.0', '20.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [100.0, 200.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [20.0, 20.0], 'stddev': ''},
])
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['200.0', '200.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [200.0, 200.0], 'stddev': ''},
], test_output_secondary)
+ self._VerifyRunnableDurations(2, 60, test_output_secondary)
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
@@ -507,23 +534,15 @@ class PerfTest(unittest.TestCase):
test_input = dict(V8_JSON)
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
- self.assertEquals(0, self._CallMain('--extra-flags=--prof'))
+ self.assertEqual(0, self._CallMain('--extra-flags=--prof'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
'--flag', '--prof', 'run.js')
- def testUnzip(self):
- def Gen():
- for i in [1, 2, 3]:
- yield i, i + 1
- l, r = run_perf.Unzip(Gen())
- self.assertEquals([1, 2, 3], list(l()))
- self.assertEquals([2, 3, 4], list(r()))
-
#############################################################################
### System tests
@@ -540,54 +559,54 @@ class PerfTest(unittest.TestCase):
def testNormal(self):
results = self._RunPerf('d8_mocked1.py', 'test1.json')
- self.assertEquals([], results['errors'])
- self.assertEquals([
+ self.assertListEqual([], results['errors'])
+ self.assertListEqual(sorted([
{
'units': 'score',
'graphs': ['test1', 'Richards'],
- 'results': [u'1.2', u'1.2'],
+ 'results': [1.2, 1.2],
'stddev': '',
},
{
'units': 'score',
'graphs': ['test1', 'DeltaBlue'],
- 'results': [u'2.1', u'2.1'],
+ 'results': [2.1, 2.1],
'stddev': '',
},
- ], results['traces'])
+ ]), sorted(results['traces']))
def testResultsProcessor(self):
results = self._RunPerf('d8_mocked2.py', 'test2.json')
- self.assertEquals([], results['errors'])
- self.assertEquals([
+ self.assertListEqual([], results['errors'])
+ self.assertListEqual([
{
'units': 'score',
'graphs': ['test2', 'Richards'],
- 'results': [u'1.2', u'1.2'],
+ 'results': [1.2, 1.2],
'stddev': '',
},
{
'units': 'score',
'graphs': ['test2', 'DeltaBlue'],
- 'results': [u'2.1', u'2.1'],
+ 'results': [2.1, 2.1],
'stddev': '',
},
], results['traces'])
def testResultsProcessorNested(self):
results = self._RunPerf('d8_mocked2.py', 'test3.json')
- self.assertEquals([], results['errors'])
- self.assertEquals([
+ self.assertListEqual([], results['errors'])
+ self.assertListEqual([
{
'units': 'score',
'graphs': ['test3', 'Octane', 'Richards'],
- 'results': [u'1.2'],
+ 'results': [1.2],
'stddev': '',
},
{
'units': 'score',
'graphs': ['test3', 'Octane', 'DeltaBlue'],
- 'results': [u'2.1'],
+ 'results': [2.1],
'stddev': '',
},
], results['traces'])
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index 39b7cdf87c..0192fd8ee3 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -7,6 +7,7 @@
"is_clang": true,
"is_component_build": false,
"is_debug": false,
+ "is_full_debug": false,
"is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index 73b7a0b7c8..f19c310bf8 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -7,6 +7,7 @@
"is_clang": true,
"is_component_build": false,
"is_debug": false,
+ "is_full_debug": false,
"is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index ff72b62e22..7237000695 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -476,7 +476,10 @@ class SourceProcessor(SourceFileProcessor):
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
- IGNORE_COPYRIGHTS_DIRECTORY = "test/test262/local-tests"
+ IGNORE_COPYRIGHTS_DIRECTORIES = [
+ "test/test262/local-tests",
+ "test/mjsunit/wasm/bulk-memory-spec",
+ ]
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
@@ -494,7 +497,8 @@ class SourceProcessor(SourceFileProcessor):
print("%s contains tabs" % name)
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
- not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
+ not any(ignore_dir in name for ignore_dir
+ in SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORIES):
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print("%s is missing a correct copyright header." % name)
result = False
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index b891154e33..0165e0f1dd 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -36,103 +36,96 @@ INSTANCE_TYPES = {
72: "BYTE_ARRAY_TYPE",
73: "BYTECODE_ARRAY_TYPE",
74: "FREE_SPACE_TYPE",
- 75: "FIXED_INT8_ARRAY_TYPE",
- 76: "FIXED_UINT8_ARRAY_TYPE",
- 77: "FIXED_INT16_ARRAY_TYPE",
- 78: "FIXED_UINT16_ARRAY_TYPE",
- 79: "FIXED_INT32_ARRAY_TYPE",
- 80: "FIXED_UINT32_ARRAY_TYPE",
- 81: "FIXED_FLOAT32_ARRAY_TYPE",
- 82: "FIXED_FLOAT64_ARRAY_TYPE",
- 83: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 84: "FIXED_BIGINT64_ARRAY_TYPE",
- 85: "FIXED_BIGUINT64_ARRAY_TYPE",
- 86: "FIXED_DOUBLE_ARRAY_TYPE",
- 87: "FEEDBACK_METADATA_TYPE",
- 88: "FILLER_TYPE",
- 89: "ACCESS_CHECK_INFO_TYPE",
- 90: "ACCESSOR_INFO_TYPE",
- 91: "ACCESSOR_PAIR_TYPE",
- 92: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 93: "ALLOCATION_MEMENTO_TYPE",
- 94: "ASM_WASM_DATA_TYPE",
- 95: "ASYNC_GENERATOR_REQUEST_TYPE",
- 96: "CLASS_POSITIONS_TYPE",
- 97: "DEBUG_INFO_TYPE",
- 98: "ENUM_CACHE_TYPE",
- 99: "FUNCTION_TEMPLATE_INFO_TYPE",
- 100: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 101: "INTERCEPTOR_INFO_TYPE",
- 102: "INTERPRETER_DATA_TYPE",
- 103: "MODULE_INFO_ENTRY_TYPE",
- 104: "MODULE_TYPE",
- 105: "OBJECT_TEMPLATE_INFO_TYPE",
- 106: "PROMISE_CAPABILITY_TYPE",
- 107: "PROMISE_REACTION_TYPE",
- 108: "PROTOTYPE_INFO_TYPE",
- 109: "SCRIPT_TYPE",
- 110: "STACK_FRAME_INFO_TYPE",
- 111: "STACK_TRACE_FRAME_TYPE",
- 112: "TUPLE2_TYPE",
- 113: "TUPLE3_TYPE",
- 114: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 115: "WASM_DEBUG_INFO_TYPE",
- 116: "WASM_EXCEPTION_TAG_TYPE",
- 117: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 118: "CALLABLE_TASK_TYPE",
- 119: "CALLBACK_TASK_TYPE",
- 120: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
- 121: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
- 122: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 123: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
- 124: "ALLOCATION_SITE_TYPE",
- 125: "EMBEDDER_DATA_ARRAY_TYPE",
- 126: "FIXED_ARRAY_TYPE",
- 127: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 129: "HASH_TABLE_TYPE",
- 130: "ORDERED_HASH_MAP_TYPE",
- 131: "ORDERED_HASH_SET_TYPE",
- 132: "ORDERED_NAME_DICTIONARY_TYPE",
- 133: "NAME_DICTIONARY_TYPE",
- 134: "GLOBAL_DICTIONARY_TYPE",
- 135: "NUMBER_DICTIONARY_TYPE",
- 136: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 137: "STRING_TABLE_TYPE",
- 138: "EPHEMERON_HASH_TABLE_TYPE",
- 139: "SCOPE_INFO_TYPE",
- 140: "SCRIPT_CONTEXT_TABLE_TYPE",
- 141: "AWAIT_CONTEXT_TYPE",
- 142: "BLOCK_CONTEXT_TYPE",
- 143: "CATCH_CONTEXT_TYPE",
- 144: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 145: "EVAL_CONTEXT_TYPE",
- 146: "FUNCTION_CONTEXT_TYPE",
- 147: "MODULE_CONTEXT_TYPE",
- 148: "NATIVE_CONTEXT_TYPE",
- 149: "SCRIPT_CONTEXT_TYPE",
- 150: "WITH_CONTEXT_TYPE",
- 151: "WEAK_FIXED_ARRAY_TYPE",
- 152: "TRANSITION_ARRAY_TYPE",
- 153: "CALL_HANDLER_INFO_TYPE",
- 154: "CELL_TYPE",
- 155: "CODE_DATA_CONTAINER_TYPE",
- 156: "DESCRIPTOR_ARRAY_TYPE",
- 157: "FEEDBACK_CELL_TYPE",
- 158: "FEEDBACK_VECTOR_TYPE",
- 159: "LOAD_HANDLER_TYPE",
- 160: "PREPARSE_DATA_TYPE",
- 161: "PROPERTY_ARRAY_TYPE",
- 162: "PROPERTY_CELL_TYPE",
- 163: "SHARED_FUNCTION_INFO_TYPE",
- 164: "SMALL_ORDERED_HASH_MAP_TYPE",
- 165: "SMALL_ORDERED_HASH_SET_TYPE",
- 166: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 167: "STORE_HANDLER_TYPE",
- 168: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 169: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 170: "WEAK_ARRAY_LIST_TYPE",
- 171: "WEAK_CELL_TYPE",
+ 75: "FIXED_DOUBLE_ARRAY_TYPE",
+ 76: "FEEDBACK_METADATA_TYPE",
+ 77: "FILLER_TYPE",
+ 78: "ACCESS_CHECK_INFO_TYPE",
+ 79: "ACCESSOR_INFO_TYPE",
+ 80: "ACCESSOR_PAIR_TYPE",
+ 81: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 82: "ALLOCATION_MEMENTO_TYPE",
+ 83: "ASM_WASM_DATA_TYPE",
+ 84: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 85: "CLASS_POSITIONS_TYPE",
+ 86: "DEBUG_INFO_TYPE",
+ 87: "ENUM_CACHE_TYPE",
+ 88: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 89: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 90: "INTERCEPTOR_INFO_TYPE",
+ 91: "INTERPRETER_DATA_TYPE",
+ 92: "MODULE_INFO_ENTRY_TYPE",
+ 93: "MODULE_TYPE",
+ 94: "OBJECT_TEMPLATE_INFO_TYPE",
+ 95: "PROMISE_CAPABILITY_TYPE",
+ 96: "PROMISE_REACTION_TYPE",
+ 97: "PROTOTYPE_INFO_TYPE",
+ 98: "SCRIPT_TYPE",
+ 99: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+ 100: "STACK_FRAME_INFO_TYPE",
+ 101: "STACK_TRACE_FRAME_TYPE",
+ 102: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 103: "TUPLE2_TYPE",
+ 104: "TUPLE3_TYPE",
+ 105: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 106: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 107: "WASM_DEBUG_INFO_TYPE",
+ 108: "WASM_EXCEPTION_TAG_TYPE",
+ 109: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 110: "WASM_JS_FUNCTION_DATA_TYPE",
+ 111: "CALLABLE_TASK_TYPE",
+ 112: "CALLBACK_TASK_TYPE",
+ 113: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 114: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 115: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 116: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
+ 117: "ALLOCATION_SITE_TYPE",
+ 118: "EMBEDDER_DATA_ARRAY_TYPE",
+ 119: "FIXED_ARRAY_TYPE",
+ 120: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 121: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 122: "HASH_TABLE_TYPE",
+ 123: "ORDERED_HASH_MAP_TYPE",
+ 124: "ORDERED_HASH_SET_TYPE",
+ 125: "ORDERED_NAME_DICTIONARY_TYPE",
+ 126: "NAME_DICTIONARY_TYPE",
+ 127: "GLOBAL_DICTIONARY_TYPE",
+ 128: "NUMBER_DICTIONARY_TYPE",
+ 129: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 130: "STRING_TABLE_TYPE",
+ 131: "EPHEMERON_HASH_TABLE_TYPE",
+ 132: "SCOPE_INFO_TYPE",
+ 133: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 134: "AWAIT_CONTEXT_TYPE",
+ 135: "BLOCK_CONTEXT_TYPE",
+ 136: "CATCH_CONTEXT_TYPE",
+ 137: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 138: "EVAL_CONTEXT_TYPE",
+ 139: "FUNCTION_CONTEXT_TYPE",
+ 140: "MODULE_CONTEXT_TYPE",
+ 141: "NATIVE_CONTEXT_TYPE",
+ 142: "SCRIPT_CONTEXT_TYPE",
+ 143: "WITH_CONTEXT_TYPE",
+ 144: "WEAK_FIXED_ARRAY_TYPE",
+ 145: "TRANSITION_ARRAY_TYPE",
+ 146: "CALL_HANDLER_INFO_TYPE",
+ 147: "CELL_TYPE",
+ 148: "CODE_DATA_CONTAINER_TYPE",
+ 149: "DESCRIPTOR_ARRAY_TYPE",
+ 150: "FEEDBACK_CELL_TYPE",
+ 151: "FEEDBACK_VECTOR_TYPE",
+ 152: "LOAD_HANDLER_TYPE",
+ 153: "PREPARSE_DATA_TYPE",
+ 154: "PROPERTY_ARRAY_TYPE",
+ 155: "PROPERTY_CELL_TYPE",
+ 156: "SHARED_FUNCTION_INFO_TYPE",
+ 157: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 158: "SMALL_ORDERED_HASH_SET_TYPE",
+ 159: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 160: "STORE_HANDLER_TYPE",
+ 161: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 162: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 163: "WEAK_ARRAY_LIST_TYPE",
+ 164: "WEAK_CELL_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -196,10 +189,10 @@ KNOWN_MAPS = {
("read_only_space", 0x00139): (74, "FreeSpaceMap"),
("read_only_space", 0x00189): (68, "MetaMap"),
("read_only_space", 0x00209): (67, "NullMap"),
- ("read_only_space", 0x00271): (156, "DescriptorArrayMap"),
- ("read_only_space", 0x002d1): (151, "WeakFixedArrayMap"),
- ("read_only_space", 0x00321): (88, "OnePointerFillerMap"),
- ("read_only_space", 0x00371): (88, "TwoPointerFillerMap"),
+ ("read_only_space", 0x00271): (149, "DescriptorArrayMap"),
+ ("read_only_space", 0x002d1): (144, "WeakFixedArrayMap"),
+ ("read_only_space", 0x00321): (77, "OnePointerFillerMap"),
+ ("read_only_space", 0x00371): (77, "TwoPointerFillerMap"),
("read_only_space", 0x003f1): (67, "UninitializedMap"),
("read_only_space", 0x00461): (8, "OneByteInternalizedStringMap"),
("read_only_space", 0x00501): (67, "UndefinedMap"),
@@ -207,71 +200,71 @@ KNOWN_MAPS = {
("read_only_space", 0x005e1): (67, "TheHoleMap"),
("read_only_space", 0x00689): (67, "BooleanMap"),
("read_only_space", 0x00761): (72, "ByteArrayMap"),
- ("read_only_space", 0x007b1): (126, "FixedArrayMap"),
- ("read_only_space", 0x00801): (126, "FixedCOWArrayMap"),
- ("read_only_space", 0x00851): (129, "HashTableMap"),
+ ("read_only_space", 0x007b1): (119, "FixedArrayMap"),
+ ("read_only_space", 0x00801): (119, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00851): (122, "HashTableMap"),
("read_only_space", 0x008a1): (64, "SymbolMap"),
("read_only_space", 0x008f1): (40, "OneByteStringMap"),
- ("read_only_space", 0x00941): (139, "ScopeInfoMap"),
- ("read_only_space", 0x00991): (163, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x00941): (132, "ScopeInfoMap"),
+ ("read_only_space", 0x00991): (156, "SharedFunctionInfoMap"),
("read_only_space", 0x009e1): (69, "CodeMap"),
- ("read_only_space", 0x00a31): (146, "FunctionContextMap"),
- ("read_only_space", 0x00a81): (154, "CellMap"),
- ("read_only_space", 0x00ad1): (162, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00a31): (139, "FunctionContextMap"),
+ ("read_only_space", 0x00a81): (147, "CellMap"),
+ ("read_only_space", 0x00ad1): (155, "GlobalPropertyCellMap"),
("read_only_space", 0x00b21): (71, "ForeignMap"),
- ("read_only_space", 0x00b71): (152, "TransitionArrayMap"),
- ("read_only_space", 0x00bc1): (158, "FeedbackVectorMap"),
+ ("read_only_space", 0x00b71): (145, "TransitionArrayMap"),
+ ("read_only_space", 0x00bc1): (151, "FeedbackVectorMap"),
("read_only_space", 0x00c61): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x00d01): (67, "ExceptionMap"),
("read_only_space", 0x00da1): (67, "TerminationExceptionMap"),
("read_only_space", 0x00e49): (67, "OptimizedOutMap"),
("read_only_space", 0x00ee9): (67, "StaleRegisterMap"),
- ("read_only_space", 0x00f59): (148, "NativeContextMap"),
- ("read_only_space", 0x00fa9): (147, "ModuleContextMap"),
- ("read_only_space", 0x00ff9): (145, "EvalContextMap"),
- ("read_only_space", 0x01049): (149, "ScriptContextMap"),
- ("read_only_space", 0x01099): (141, "AwaitContextMap"),
- ("read_only_space", 0x010e9): (142, "BlockContextMap"),
- ("read_only_space", 0x01139): (143, "CatchContextMap"),
- ("read_only_space", 0x01189): (150, "WithContextMap"),
- ("read_only_space", 0x011d9): (144, "DebugEvaluateContextMap"),
- ("read_only_space", 0x01229): (140, "ScriptContextTableMap"),
- ("read_only_space", 0x01279): (128, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x012c9): (87, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x01319): (126, "ArrayListMap"),
+ ("read_only_space", 0x00f59): (141, "NativeContextMap"),
+ ("read_only_space", 0x00fa9): (140, "ModuleContextMap"),
+ ("read_only_space", 0x00ff9): (138, "EvalContextMap"),
+ ("read_only_space", 0x01049): (142, "ScriptContextMap"),
+ ("read_only_space", 0x01099): (134, "AwaitContextMap"),
+ ("read_only_space", 0x010e9): (135, "BlockContextMap"),
+ ("read_only_space", 0x01139): (136, "CatchContextMap"),
+ ("read_only_space", 0x01189): (143, "WithContextMap"),
+ ("read_only_space", 0x011d9): (137, "DebugEvaluateContextMap"),
+ ("read_only_space", 0x01229): (133, "ScriptContextTableMap"),
+ ("read_only_space", 0x01279): (121, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x012c9): (76, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x01319): (119, "ArrayListMap"),
("read_only_space", 0x01369): (66, "BigIntMap"),
- ("read_only_space", 0x013b9): (127, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x013b9): (120, "ObjectBoilerplateDescriptionMap"),
("read_only_space", 0x01409): (73, "BytecodeArrayMap"),
- ("read_only_space", 0x01459): (155, "CodeDataContainerMap"),
- ("read_only_space", 0x014a9): (86, "FixedDoubleArrayMap"),
- ("read_only_space", 0x014f9): (134, "GlobalDictionaryMap"),
- ("read_only_space", 0x01549): (157, "ManyClosuresCellMap"),
- ("read_only_space", 0x01599): (126, "ModuleInfoMap"),
+ ("read_only_space", 0x01459): (148, "CodeDataContainerMap"),
+ ("read_only_space", 0x014a9): (75, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x014f9): (127, "GlobalDictionaryMap"),
+ ("read_only_space", 0x01549): (150, "ManyClosuresCellMap"),
+ ("read_only_space", 0x01599): (119, "ModuleInfoMap"),
("read_only_space", 0x015e9): (70, "MutableHeapNumberMap"),
- ("read_only_space", 0x01639): (133, "NameDictionaryMap"),
- ("read_only_space", 0x01689): (157, "NoClosuresCellMap"),
- ("read_only_space", 0x016d9): (135, "NumberDictionaryMap"),
- ("read_only_space", 0x01729): (157, "OneClosureCellMap"),
- ("read_only_space", 0x01779): (130, "OrderedHashMapMap"),
- ("read_only_space", 0x017c9): (131, "OrderedHashSetMap"),
- ("read_only_space", 0x01819): (132, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x01869): (160, "PreparseDataMap"),
- ("read_only_space", 0x018b9): (161, "PropertyArrayMap"),
- ("read_only_space", 0x01909): (153, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x01959): (153, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x019a9): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x019f9): (136, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x01a49): (126, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x01a99): (164, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x01ae9): (165, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x01b39): (166, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x01b89): (137, "StringTableMap"),
- ("read_only_space", 0x01bd9): (168, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x01c29): (169, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x01c79): (170, "WeakArrayListMap"),
- ("read_only_space", 0x01cc9): (138, "EphemeronHashTableMap"),
- ("read_only_space", 0x01d19): (125, "EmbedderDataArrayMap"),
- ("read_only_space", 0x01d69): (171, "WeakCellMap"),
+ ("read_only_space", 0x01639): (126, "NameDictionaryMap"),
+ ("read_only_space", 0x01689): (150, "NoClosuresCellMap"),
+ ("read_only_space", 0x016d9): (128, "NumberDictionaryMap"),
+ ("read_only_space", 0x01729): (150, "OneClosureCellMap"),
+ ("read_only_space", 0x01779): (123, "OrderedHashMapMap"),
+ ("read_only_space", 0x017c9): (124, "OrderedHashSetMap"),
+ ("read_only_space", 0x01819): (125, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x01869): (153, "PreparseDataMap"),
+ ("read_only_space", 0x018b9): (154, "PropertyArrayMap"),
+ ("read_only_space", 0x01909): (146, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x01959): (146, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x019a9): (146, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x019f9): (129, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x01a49): (119, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x01a99): (157, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x01ae9): (158, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x01b39): (159, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x01b89): (130, "StringTableMap"),
+ ("read_only_space", 0x01bd9): (161, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x01c29): (162, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x01c79): (163, "WeakArrayListMap"),
+ ("read_only_space", 0x01cc9): (131, "EphemeronHashTableMap"),
+ ("read_only_space", 0x01d19): (118, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x01d69): (164, "WeakCellMap"),
("read_only_space", 0x01db9): (58, "NativeSourceStringMap"),
("read_only_space", 0x01e09): (32, "StringMap"),
("read_only_space", 0x01e59): (41, "ConsOneByteStringMap"),
@@ -289,62 +282,55 @@ KNOWN_MAPS = {
("read_only_space", 0x02219): (18, "UncachedExternalInternalizedStringMap"),
("read_only_space", 0x02269): (26, "UncachedExternalOneByteInternalizedStringMap"),
("read_only_space", 0x022b9): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x02309): (76, "FixedUint8ArrayMap"),
- ("read_only_space", 0x02359): (75, "FixedInt8ArrayMap"),
- ("read_only_space", 0x023a9): (78, "FixedUint16ArrayMap"),
- ("read_only_space", 0x023f9): (77, "FixedInt16ArrayMap"),
- ("read_only_space", 0x02449): (80, "FixedUint32ArrayMap"),
- ("read_only_space", 0x02499): (79, "FixedInt32ArrayMap"),
- ("read_only_space", 0x024e9): (81, "FixedFloat32ArrayMap"),
- ("read_only_space", 0x02539): (82, "FixedFloat64ArrayMap"),
- ("read_only_space", 0x02589): (83, "FixedUint8ClampedArrayMap"),
- ("read_only_space", 0x025d9): (85, "FixedBigUint64ArrayMap"),
- ("read_only_space", 0x02629): (84, "FixedBigInt64ArrayMap"),
- ("read_only_space", 0x02679): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x026e1): (98, "EnumCacheMap"),
- ("read_only_space", 0x02781): (114, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x02ad1): (101, "InterceptorInfoMap"),
- ("read_only_space", 0x050b9): (89, "AccessCheckInfoMap"),
- ("read_only_space", 0x05109): (90, "AccessorInfoMap"),
- ("read_only_space", 0x05159): (91, "AccessorPairMap"),
- ("read_only_space", 0x051a9): (92, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x051f9): (93, "AllocationMementoMap"),
- ("read_only_space", 0x05249): (94, "AsmWasmDataMap"),
- ("read_only_space", 0x05299): (95, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x052e9): (96, "ClassPositionsMap"),
- ("read_only_space", 0x05339): (97, "DebugInfoMap"),
- ("read_only_space", 0x05389): (99, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x053d9): (100, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05429): (102, "InterpreterDataMap"),
- ("read_only_space", 0x05479): (103, "ModuleInfoEntryMap"),
- ("read_only_space", 0x054c9): (104, "ModuleMap"),
- ("read_only_space", 0x05519): (105, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x05569): (106, "PromiseCapabilityMap"),
- ("read_only_space", 0x055b9): (107, "PromiseReactionMap"),
- ("read_only_space", 0x05609): (108, "PrototypeInfoMap"),
- ("read_only_space", 0x05659): (109, "ScriptMap"),
- ("read_only_space", 0x056a9): (110, "StackFrameInfoMap"),
- ("read_only_space", 0x056f9): (111, "StackTraceFrameMap"),
- ("read_only_space", 0x05749): (112, "Tuple2Map"),
- ("read_only_space", 0x05799): (113, "Tuple3Map"),
- ("read_only_space", 0x057e9): (115, "WasmDebugInfoMap"),
- ("read_only_space", 0x05839): (116, "WasmExceptionTagMap"),
- ("read_only_space", 0x05889): (117, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x058d9): (118, "CallableTaskMap"),
- ("read_only_space", 0x05929): (119, "CallbackTaskMap"),
- ("read_only_space", 0x05979): (120, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x059c9): (121, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05a19): (122, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x05a69): (123, "FinalizationGroupCleanupJobTaskMap"),
- ("read_only_space", 0x05ab9): (124, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05b09): (124, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05b59): (159, "LoadHandler1Map"),
- ("read_only_space", 0x05ba9): (159, "LoadHandler2Map"),
- ("read_only_space", 0x05bf9): (159, "LoadHandler3Map"),
- ("read_only_space", 0x05c49): (167, "StoreHandler0Map"),
- ("read_only_space", 0x05c99): (167, "StoreHandler1Map"),
- ("read_only_space", 0x05ce9): (167, "StoreHandler2Map"),
- ("read_only_space", 0x05d39): (167, "StoreHandler3Map"),
+ ("read_only_space", 0x02309): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x02371): (87, "EnumCacheMap"),
+ ("read_only_space", 0x02411): (105, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02601): (90, "InterceptorInfoMap"),
+ ("read_only_space", 0x04d99): (78, "AccessCheckInfoMap"),
+ ("read_only_space", 0x04de9): (79, "AccessorInfoMap"),
+ ("read_only_space", 0x04e39): (80, "AccessorPairMap"),
+ ("read_only_space", 0x04e89): (81, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x04ed9): (82, "AllocationMementoMap"),
+ ("read_only_space", 0x04f29): (83, "AsmWasmDataMap"),
+ ("read_only_space", 0x04f79): (84, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x04fc9): (85, "ClassPositionsMap"),
+ ("read_only_space", 0x05019): (86, "DebugInfoMap"),
+ ("read_only_space", 0x05069): (88, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x050b9): (89, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x05109): (91, "InterpreterDataMap"),
+ ("read_only_space", 0x05159): (92, "ModuleInfoEntryMap"),
+ ("read_only_space", 0x051a9): (93, "ModuleMap"),
+ ("read_only_space", 0x051f9): (94, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x05249): (95, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05299): (96, "PromiseReactionMap"),
+ ("read_only_space", 0x052e9): (97, "PrototypeInfoMap"),
+ ("read_only_space", 0x05339): (98, "ScriptMap"),
+ ("read_only_space", 0x05389): (99, "SourcePositionTableWithFrameCacheMap"),
+ ("read_only_space", 0x053d9): (100, "StackFrameInfoMap"),
+ ("read_only_space", 0x05429): (101, "StackTraceFrameMap"),
+ ("read_only_space", 0x05479): (102, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x054c9): (103, "Tuple2Map"),
+ ("read_only_space", 0x05519): (104, "Tuple3Map"),
+ ("read_only_space", 0x05569): (106, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x055b9): (107, "WasmDebugInfoMap"),
+ ("read_only_space", 0x05609): (108, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05659): (109, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x056a9): (110, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x056f9): (111, "CallableTaskMap"),
+ ("read_only_space", 0x05749): (112, "CallbackTaskMap"),
+ ("read_only_space", 0x05799): (113, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x057e9): (114, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05839): (115, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05889): (116, "FinalizationGroupCleanupJobTaskMap"),
+ ("read_only_space", 0x058d9): (117, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05929): (117, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05979): (152, "LoadHandler1Map"),
+ ("read_only_space", 0x059c9): (152, "LoadHandler2Map"),
+ ("read_only_space", 0x05a19): (152, "LoadHandler3Map"),
+ ("read_only_space", 0x05a69): (160, "StoreHandler0Map"),
+ ("read_only_space", 0x05ab9): (160, "StoreHandler1Map"),
+ ("read_only_space", 0x05b09): (160, "StoreHandler2Map"),
+ ("read_only_space", 0x05b59): (160, "StoreHandler3Map"),
("map_space", 0x00139): (1057, "ExternalMap"),
("map_space", 0x00189): (1073, "JSMessageObjectMap"),
}
@@ -369,38 +355,29 @@ KNOWN_OBJECTS = {
("read_only_space", 0x00d71): "TerminationException",
("read_only_space", 0x00e19): "OptimizedOut",
("read_only_space", 0x00eb9): "StaleRegister",
- ("read_only_space", 0x026c9): "EmptyEnumCache",
- ("read_only_space", 0x02731): "EmptyPropertyArray",
- ("read_only_space", 0x02741): "EmptyByteArray",
- ("read_only_space", 0x02751): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x02769): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x027d1): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x027e1): "EmptyFixedUint8Array",
- ("read_only_space", 0x02801): "EmptyFixedInt8Array",
- ("read_only_space", 0x02821): "EmptyFixedUint16Array",
- ("read_only_space", 0x02841): "EmptyFixedInt16Array",
- ("read_only_space", 0x02861): "EmptyFixedUint32Array",
- ("read_only_space", 0x02881): "EmptyFixedInt32Array",
- ("read_only_space", 0x028a1): "EmptyFixedFloat32Array",
- ("read_only_space", 0x028c1): "EmptyFixedFloat64Array",
- ("read_only_space", 0x028e1): "EmptyFixedUint8ClampedArray",
- ("read_only_space", 0x02901): "EmptyFixedBigUint64Array",
- ("read_only_space", 0x02921): "EmptyFixedBigInt64Array",
- ("read_only_space", 0x02941): "EmptySloppyArgumentsElements",
- ("read_only_space", 0x02961): "EmptySlowElementDictionary",
- ("read_only_space", 0x029a9): "EmptyOrderedHashMap",
- ("read_only_space", 0x029d1): "EmptyOrderedHashSet",
- ("read_only_space", 0x029f9): "EmptyFeedbackMetadata",
- ("read_only_space", 0x02a09): "EmptyPropertyCell",
- ("read_only_space", 0x02a31): "EmptyPropertyDictionary",
- ("read_only_space", 0x02a81): "NoOpInterceptorInfo",
- ("read_only_space", 0x02b21): "EmptyWeakArrayList",
- ("read_only_space", 0x02b39): "InfinityValue",
- ("read_only_space", 0x02b49): "MinusZeroValue",
- ("read_only_space", 0x02b59): "MinusInfinityValue",
- ("read_only_space", 0x02b69): "SelfReferenceMarker",
- ("read_only_space", 0x02bc1): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x02bd9): "HashSeed",
+ ("read_only_space", 0x02359): "EmptyEnumCache",
+ ("read_only_space", 0x023c1): "EmptyPropertyArray",
+ ("read_only_space", 0x023d1): "EmptyByteArray",
+ ("read_only_space", 0x023e1): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x023f9): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x02461): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x02471): "EmptySloppyArgumentsElements",
+ ("read_only_space", 0x02491): "EmptySlowElementDictionary",
+ ("read_only_space", 0x024d9): "EmptyOrderedHashMap",
+ ("read_only_space", 0x02501): "EmptyOrderedHashSet",
+ ("read_only_space", 0x02529): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x02539): "EmptyPropertyCell",
+ ("read_only_space", 0x02561): "EmptyPropertyDictionary",
+ ("read_only_space", 0x025b1): "NoOpInterceptorInfo",
+ ("read_only_space", 0x02651): "EmptyWeakArrayList",
+ ("read_only_space", 0x02669): "InfinityValue",
+ ("read_only_space", 0x02679): "MinusZeroValue",
+ ("read_only_space", 0x02689): "MinusInfinityValue",
+ ("read_only_space", 0x02699): "SelfReferenceMarker",
+ ("read_only_space", 0x026f1): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x02709): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x02721): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x02739): "HashSeed",
("old_space", 0x00139): "ArgumentsIteratorAccessor",
("old_space", 0x001a9): "ArrayLengthAccessor",
("old_space", 0x00219): "BoundFunctionLengthAccessor",
@@ -448,6 +425,7 @@ FRAME_MARKERS = (
"JS_TO_WASM",
"WASM_INTERPRETER_ENTRY",
"C_WASM_ENTRY",
+ "WASM_EXIT",
"WASM_COMPILE_LAZY",
"INTERPRETED",
"STUB",
diff --git a/deps/v8/tools/vim/ninja-build.vim b/deps/v8/tools/vim/ninja-build.vim
index 3e9b8948ca..7c885255ce 100644
--- a/deps/v8/tools/vim/ninja-build.vim
+++ b/deps/v8/tools/vim/ninja-build.vim
@@ -53,11 +53,8 @@ def path_to_build_dir(configuration):
def compute_ninja_command_for_targets(targets='', configuration=None):
- flags = []
- if "use_goma=1" in os.getenv('GYP_DEFINES', '').split(' '):
- flags = ['-j', '512']
build_dir = path_to_build_dir(configuration);
- build_cmd = ' '.join(['ninja'] + flags + ['-C', build_dir, targets])
+ build_cmd = ' '.join(['autoninja', '-C', build_dir, targets])
vim.command('return "%s"' % build_cmd)
diff --git a/deps/v8/tools/wasm-compilation-hints/OWNERS b/deps/v8/tools/wasm-compilation-hints/OWNERS
new file mode 100644
index 0000000000..4c00a60a00
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/OWNERS
@@ -0,0 +1,2 @@
+clemensh@chromium.org
+mstarzinger@chromium.org
diff --git a/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py b/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
new file mode 100755
index 0000000000..fd4b65b8ff
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+FUNCTION_SECTION_ID = 3
+
+def parse_args():
+ parser = argparse.ArgumentParser(\
+ description="Inject compilation hints into a Wasm module.")
+ parser.add_argument("-i", "--in-wasm-file", \
+ type=str, \
+ help="original wasm module")
+ parser.add_argument("-o", "--out-wasm-file", \
+ type=str, \
+ help="wasm module with injected hints")
+ parser.add_argument("-x", "--hints-file", \
+ type=str, required=True, \
+ help="binary hints file to be injected as a custom section " + \
+ "'compilationHints'")
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = parse_args()
+ in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+ out_wasm_file = args.out_wasm_file if args.out_wasm_file else sys.stdout.fileno()
+ hints_bs = open(args.hints_file, "rb").read()
+ with io.open(in_wasm_file, "rb") as fin:
+ with io.open(out_wasm_file, "wb") as fout:
+ magic_number, bs = read_magic_number(fin);
+ fout.write(bs)
+ version, bs = read_version(fin);
+ fout.write(bs)
+ num_declared_functions = None
+ while True:
+ id, bs = read_varuintN(fin)
+ fout.write(bs)
+ if id == None:
+ break
+ payload_length, bs = read_varuintN(fin)
+ fout.write(bs)
+
+ # Peek into function section for upcoming validity check.
+ if id == FUNCTION_SECTION_ID:
+ num_declared_functions, bs = peek_varuintN(fin)
+
+ bs = fin.read(payload_length)
+ fout.write(bs)
+
+ # Instert hint section after function section.
+ if id == FUNCTION_SECTION_ID:
+ assert len(hints_bs) == num_declared_functions, "unexpected number of hints"
+ write_compilation_hints_section(fout, hints_bs)
diff --git a/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py b/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
new file mode 100755
index 0000000000..a762bd78a6
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+def parse_args():
+ parser = argparse.ArgumentParser(\
+ description="Read compilation hints from Wasm module.")
+ parser.add_argument("in_wasm_file", \
+ type=str, \
+ help="wasm module")
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = parse_args()
+ in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+ with io.open(in_wasm_file, "rb") as fin:
+ read_magic_number(fin);
+ read_version(fin);
+ while True:
+ id, bs = read_varuintN(fin)
+ if id == None:
+ break
+ payload_length, bs = read_varuintN(fin)
+ if id == CUSTOM_SECTION_ID:
+ section_name_length, section_name_length_bs = read_varuintN(fin)
+ section_name_bs = fin.read(section_name_length)
+ if section_name_bs == "compilationHints":
+ num_hints, bs = read_varuintN(fin)
+ print "Custom section compilationHints with", num_hints, "hints:"
+ for i in range(num_hints):
+ hint, bs = read_uint8(fin)
+ print i, hex(hint)
+ else:
+ remaining_length = payload_length \
+ - len(section_name_length_bs) \
+ - len(section_name_bs)
+ fin.read()
+ else:
+ fin.read(payload_length)
diff --git a/deps/v8/tools/wasm-compilation-hints/wasm.py b/deps/v8/tools/wasm-compilation-hints/wasm.py
new file mode 100644
index 0000000000..ae3d0841e8
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/wasm.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import io
+import math
+import struct
+import sys
+
+CUSTOM_SECTION_ID = 0
+FUNCTION_SECTION_ID = 3
+
+def peek_uint8(fin):
+ bs = fin.peek(1)[:1]
+ if len(bs) != 1:
+ return None, bs
+ return ord(bs[0]), bs
+
+def read_uint8(fin):
+ value, bs = peek_uint8(fin)
+ fin.read(len(bs))
+ return value, bs
+
+def peek_uint32(fin):
+ bs = fin.peek(4)[:4]
+ if len(bs) != 4:
+ return None, bs
+ return ord(bs[0]) | ord(bs[1]) << 8 | ord(bs[2]) << 16 | ord(bs[3]) << 24, bs
+
+def read_uint32(fin):
+ value, bs = peek_uint32(fin)
+ fin.read(len(bs))
+ return value, bs
+
+def peek_varuintN(fin):
+ value = 0
+ shift = 0
+ n = 1
+ while True:
+ bs = fin.peek(n)[:n]
+ if len(bs) < n:
+ return None, bs
+ b = ord(bs[-1])
+ value |= (b & 0x7F) << shift;
+ if (b & 0x80) == 0x00:
+ return value, bs
+ shift += 7;
+ n += 1
+
+def read_varuintN(fin):
+ value, bs = peek_varuintN(fin)
+ fin.read(len(bs))
+ return value, bs
+
+def to_varuintN(value):
+ bs = ""
+ while True:
+ b = value & 0x7F
+ value >>= 7
+ if (value != 0x00):
+ b |= 0x80
+ bs += chr(b)
+ if value == 0x00:
+ return bs
+
+def write_varuintN(value, fout):
+ bs = to_varuintN(value)
+ fout.write(bs)
+ return bs
+
+def peek_magic_number(fin, expected_magic_number=0x6d736100):
+ magic_number, bs = peek_uint32(fin)
+ assert magic_number == expected_magic_number, "unexpected magic number"
+ return magic_number, bs
+
+def read_magic_number(fin, expected_magic_number=0x6d736100):
+ magic_number, bs = peek_magic_number(fin, expected_magic_number)
+ fin.read(len(bs))
+ return magic_number, bs
+
+def peek_version(fin, expected_version=1):
+ version, bs = peek_uint32(fin)
+ assert version == expected_version, "unexpected version"
+ return version, bs
+
+def read_version(fin, expected_version=1):
+ version, bs = peek_version(fin, expected_version)
+ fin.read(len(bs))
+ return version, bs
+
+def write_custom_section(fout, section_name_bs, payload_bs):
+ section_name_length_bs = to_varuintN(len(section_name_bs))
+ payload_length_bs = to_varuintN(len(section_name_bs) \
+ + len(section_name_length_bs) + len(payload_bs))
+ section_id_bs = to_varuintN(CUSTOM_SECTION_ID)
+ fout.write(section_id_bs)
+ fout.write(payload_length_bs)
+ fout.write(section_name_length_bs)
+ fout.write(section_name_bs)
+ fout.write(payload_bs)
+
+def write_compilation_hints_section(fout, hints_bs):
+ num_compilation_hints_bs = to_varuintN(len(hints_bs))
+ section_name_bs = b"compilationHints"
+ payload_bs = num_compilation_hints_bs + hints_bs
+ write_custom_section(fout, section_name_bs, payload_bs)
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 1747d02022..5f663412eb 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,4 +7,4 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles....................................
+The bartender starts to shake the bottles.......