summaryrefslogtreecommitdiff
path: root/deps/v8/tools
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-03-07 08:54:53 +0100
committerMichaël Zasso <targos@protonmail.com>2018-03-07 16:48:52 +0100
commit88786fecff336342a56e6f2e7ff3b286be716e47 (patch)
tree92e6ba5b8ac8dae1a058988d20c9d27bfa654390 /deps/v8/tools
parent4e86f9b5ab83cbabf43839385bf383e6a7ef7d19 (diff)
downloadandroid-node-v8-88786fecff336342a56e6f2e7ff3b286be716e47.tar.gz
android-node-v8-88786fecff336342a56e6f2e7ff3b286be716e47.tar.bz2
android-node-v8-88786fecff336342a56e6f2e7ff3b286be716e47.zip
deps: update V8 to 6.5.254.31
PR-URL: https://github.com/nodejs/node/pull/18453 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Yang Guo <yangguo@chromium.org> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
Diffstat (limited to 'deps/v8/tools')
-rw-r--r--deps/v8/tools/BUILD.gn1
-rw-r--r--deps/v8/tools/callstats.html2
-rwxr-xr-xdeps/v8/tools/callstats.py1
-rw-r--r--deps/v8/tools/foozzie/testdata/failure_output.txt2
-rwxr-xr-xdeps/v8/tools/foozzie/v8_foozzie.py34
-rw-r--r--deps/v8/tools/foozzie/v8_suppressions.py65
-rw-r--r--deps/v8/tools/gdbinit2
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py4
-rw-r--r--deps/v8/tools/heap-stats/README.md15
-rw-r--r--deps/v8/tools/heap-stats/categories.js167
-rw-r--r--deps/v8/tools/heap-stats/details-selection.html72
-rw-r--r--deps/v8/tools/heap-stats/details-selection.js211
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.html16
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.js135
-rw-r--r--deps/v8/tools/heap-stats/index.html88
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.html26
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js300
-rwxr-xr-xdeps/v8/tools/js2c.py12
-rwxr-xr-xdeps/v8/tools/map-processor41
-rw-r--r--deps/v8/tools/map-processor-driver.js33
-rw-r--r--deps/v8/tools/map-processor.html1254
-rw-r--r--deps/v8/tools/map-processor.js717
-rwxr-xr-xdeps/v8/tools/node/backport_node.py (renamed from deps/v8/tools/release/backport_node.py)11
-rwxr-xr-xdeps/v8/tools/node/build_gn.py82
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py96
-rwxr-xr-xdeps/v8/tools/node/node_common.py43
-rwxr-xr-xdeps/v8/tools/node/test_backport_node.py (renamed from deps/v8/tools/release/test_backport_node.py)3
-rwxr-xr-xdeps/v8/tools/node/test_update_node.py (renamed from deps/v8/tools/release/test_update_node.py)1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/.gitignore (renamed from deps/v8/tools/release/testdata/node/deps/v8/.gitignore)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me (renamed from deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo (renamed from deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/delete_me (renamed from deps/v8/tools/release/testdata/node/deps/v8/delete_me)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h (renamed from deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/v8_foo (renamed from deps/v8/tools/release/testdata/node/deps/v8/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/v8/.gitignore (renamed from deps/v8/tools/release/testdata/v8/.gitignore)0
-rw-r--r--deps/v8/tools/node/testdata/v8/base/trace_event/common/common (renamed from deps/v8/tools/release/testdata/v8/base/trace_event/common/common)0
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_foo (renamed from deps/v8/tools/release/testdata/v8/baz/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_new (renamed from deps/v8/tools/release/testdata/v8/baz/v8_new)0
-rw-r--r--deps/v8/tools/node/testdata/v8/new/v8_new (renamed from deps/v8/tools/release/testdata/v8/new/v8_new)0
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_foo (renamed from deps/v8/tools/release/testdata/v8/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_new (renamed from deps/v8/tools/release/testdata/v8/v8_new)0
-rwxr-xr-xdeps/v8/tools/node/update_node.py167
-rwxr-xr-xdeps/v8/tools/parse-processor41
-rw-r--r--deps/v8/tools/parse-processor-driver.js33
-rw-r--r--deps/v8/tools/parse-processor.html337
-rw-r--r--deps/v8/tools/parse-processor.js918
-rwxr-xr-xdeps/v8/tools/perf-compare.py1
-rwxr-xr-xdeps/v8/tools/perf-to-html.py1
-rw-r--r--deps/v8/tools/predictable_wrapper.py66
-rwxr-xr-xdeps/v8/tools/presubmit.py14
-rwxr-xr-xdeps/v8/tools/process-heap-prof.py120
-rw-r--r--deps/v8/tools/run-num-fuzzer.isolate1
-rwxr-xr-xdeps/v8/tools/run_perf.py33
-rw-r--r--deps/v8/tools/testrunner/PRESUBMIT.py8
-rw-r--r--deps/v8/tools/testrunner/base_runner.py141
-rwxr-xr-xdeps/v8/tools/testrunner/deopt_fuzzer.py75
-rwxr-xr-xdeps/v8/tools/testrunner/gc_fuzzer.py113
-rw-r--r--deps/v8/tools/testrunner/local/command.py171
-rw-r--r--deps/v8/tools/testrunner/local/commands.py152
-rw-r--r--deps/v8/tools/testrunner/local/execution.py275
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py9
-rw-r--r--deps/v8/tools/testrunner/local/perfdata.py17
-rw-r--r--deps/v8/tools/testrunner/local/pool.py22
-rw-r--r--deps/v8/tools/testrunner/local/progress.py185
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py79
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py328
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py54
-rw-r--r--deps/v8/tools/testrunner/local/utils.py2
-rw-r--r--deps/v8/tools/testrunner/local/variants.py36
-rw-r--r--deps/v8/tools/testrunner/local/verbose.py57
-rw-r--r--deps/v8/tools/testrunner/objects/context.py6
-rw-r--r--deps/v8/tools/testrunner/objects/output.py3
-rw-r--r--deps/v8/tools/testrunner/objects/predictable.py57
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py271
-rw-r--r--deps/v8/tools/testrunner/outproc/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py166
-rw-r--r--deps/v8/tools/testrunner/outproc/message.py56
-rw-r--r--deps/v8/tools/testrunner/outproc/mkgrokdump.py31
-rw-r--r--deps/v8/tools/testrunner/outproc/mozilla.py33
-rw-r--r--deps/v8/tools/testrunner/outproc/test262.py54
-rw-r--r--deps/v8/tools/testrunner/outproc/webkit.py18
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py250
-rw-r--r--deps/v8/tools/testrunner/testproc/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py207
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py92
-rw-r--r--deps/v8/tools/testrunner/testproc/filter.py83
-rw-r--r--deps/v8/tools/testrunner/testproc/loader.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py385
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py59
-rw-r--r--deps/v8/tools/testrunner/testproc/result.py97
-rw-r--r--deps/v8/tools/testrunner/testproc/shard.py30
-rw-r--r--deps/v8/tools/testrunner/testproc/variant.py68
-rw-r--r--deps/v8/tools/turbolizer/code-view.js2
-rw-r--r--deps/v8/tools/turbolizer/graph-view.js32
-rw-r--r--deps/v8/tools/turbolizer/index.html16
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css147
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.js255
-rw-r--r--deps/v8/tools/turbolizer/view.js12
-rw-r--r--deps/v8/tools/unittests/PRESUBMIT.py9
-rwxr-xr-xdeps/v8/tools/unittests/predictable_wrapper_test.py57
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/unittests/run_perf_test.py40
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py667
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json107
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json74
-rw-r--r--deps/v8/tools/unittests/testdata/predictable_mocked.py28
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py16
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status35
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py31
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json18
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py29
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status6
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py31
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json18
-rwxr-xr-xdeps/v8/tools/v8-rolls.sh120
-rw-r--r--deps/v8/tools/v8heapconst.py370
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh29
-rw-r--r--deps/v8/tools/whitespace.txt7
117 files changed, 9245 insertions, 1800 deletions
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index a15058a186..1c0864d0d8 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -50,6 +50,7 @@ v8_isolate_run("run-gcmole") {
isolate = "gcmole/run-gcmole.isolate"
}
+# TODO(machenbach): Add tests as dependencies.
v8_isolate_run("run-num-fuzzer") {
deps = [
"..:d8_run",
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index a75edf8968..2618b50b71 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1727,6 +1727,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
Group.add('callback', new Group('Blink C++', /.*Callback.*/, "#109618"));
Group.add('api', new Group('API', /.*API.*/, "#990099"));
Group.add('gc-custom', new Group('GC-Custom', /GC_Custom_.*/, "#0099C6"));
+ Group.add('gc-background',
+ new Group('GC-Background', /.*GC.*BACKGROUND.*/, "#00597c"));
Group.add('gc', new Group('GC', /GC_.*|AllocateInTargetSpace/, "#00799c"));
Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 1b123cb936..5215d6319f 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -356,6 +356,7 @@ def read_stats(path, domain, args):
('Group-Callback', re.compile(".*Callback.*")),
('Group-API', re.compile(".*API.*")),
('Group-GC-Custom', re.compile("GC_Custom_.*")),
+ ('Group-GC-Background', re.compile(".*GC.*BACKGROUND.*")),
('Group-GC', re.compile("GC_.*|AllocateInTargetSpace")),
('Group-JavaScript', re.compile("JS_Execution")),
('Group-Runtime', re.compile(".*"))]
diff --git a/deps/v8/tools/foozzie/testdata/failure_output.txt b/deps/v8/tools/foozzie/testdata/failure_output.txt
index 654a84fb98..85b1d7ab77 100644
--- a/deps/v8/tools/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/foozzie/testdata/failure_output.txt
@@ -11,7 +11,7 @@
# Flags of x64,ignition:
--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --turbo-filter=~ --noopt --suppress-asm-messages
# Flags of x64,ignition_turbo:
---abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --suppress-asm-messages
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --suppress-asm-messages --stress-scavenge=100
#
# Difference:
- unknown
diff --git a/deps/v8/tools/foozzie/v8_foozzie.py b/deps/v8/tools/foozzie/v8_foozzie.py
index 2b61410ce4..9bb3512bcf 100755
--- a/deps/v8/tools/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/foozzie/v8_foozzie.py
@@ -12,6 +12,7 @@ import hashlib
import itertools
import json
import os
+import random
import re
import sys
import traceback
@@ -55,8 +56,35 @@ CONFIGS = dict(
'--no-lazy-inner-functions',
'--suppress-asm-messages',
],
+ slow_path=[
+ '--force-slow-path',
+ '--suppress-asm-messages',
+ ],
+ slow_path_opt=[
+ '--always-opt',
+ '--force-slow-path',
+ '--suppress-asm-messages',
+ ],
+ trusted=[
+ '--no-untrusted-code-mitigations',
+ '--suppress-asm-messages',
+ ],
+ trusted_opt=[
+ '--always-opt',
+ '--no-untrusted-code-mitigations',
+ '--suppress-asm-messages',
+ ],
)
+# Additional flag experiments. List of tuples like
+# (<likelihood to use flags in [0,1)>, <flag>).
+ADDITIONAL_FLAGS = [
+ (0.1, '--stress-marking=100'),
+ (0.1, '--stress-scavenge=100'),
+ (0.1, '--stress-compaction-random'),
+ (0.1, '--random-gc-interval=2000'),
+]
+
# Timeout in seconds for one d8 run.
TIMEOUT = 3
@@ -229,6 +257,7 @@ def fail_bailout(output, ignore_by_output_fun):
def main():
options = parse_args()
+ rng = random.Random(options.random_seed)
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
@@ -249,6 +278,11 @@ def main():
first_config_flags = common_flags + CONFIGS[options.first_config]
second_config_flags = common_flags + CONFIGS[options.second_config]
+ # Add additional flags to second config based on experiment percentages.
+ for p, flag in ADDITIONAL_FLAGS:
+ if rng.random() < p:
+ second_config_flags.append(flag)
+
def run_d8(d8, config_flags):
preamble = PREAMBLE[:]
if options.first_arch != options.second_arch:
diff --git a/deps/v8/tools/foozzie/v8_suppressions.py b/deps/v8/tools/foozzie/v8_suppressions.py
index 42fdc7a241..87b1972e94 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.py
+++ b/deps/v8/tools/foozzie/v8_suppressions.py
@@ -46,11 +46,6 @@ IGNORE_SOURCES = {
'/v8/test/mjsunit/regress/regress-2989.js',
],
- 'crbug.com/718739': [
- '/v8/test/mjsunit/regress/regress-105.js',
- '/v8/test/mjsunit/regress/regress-crbug-599714.js',
- ],
-
'crbug.com/688159': [
'/v8/test/mjsunit/es7/exponentiation-operator.js',
],
@@ -70,17 +65,25 @@ IGNORE_SOURCES = {
],
}
-# Ignore by test case pattern. Map from bug->regexp.
+# Ignore by test case pattern. Map from config->bug->regexp. Config '' is used
+# to match all configurations. Otherwise use either a compiler configuration,
+# e.g. ignition or validate_asm or an architecture, e.g. x64 or ia32.
+# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
+# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_TEST_CASES = {
- 'crbug.com/718739': re.compile(r'\.caller'),
+ 'slow_path': {
+ 'crbug.com/800651':
+ re.compile(r'async', re.S),
+ },
+ 'slow_path_opt': {
+ 'crbug.com/800651':
+ re.compile(r'async', re.S),
+ },
}
-# Ignore by output pattern. Map from config->bug->regexp. Config '' is used
-# to match all configurations. Otherwise use either a compiler configuration,
-# e.g. fullcode or validate_asm or an architecture, e.g. x64 or ia32 or a
-# comma-separated combination, e.g. x64,fullcode, for more specific
-# suppressions.
+# Ignore by output pattern. Map from config->bug->regexp. See IGNORE_TEST_CASES
+# on how to specify config keys.
# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
@@ -250,16 +253,16 @@ class Suppression(object):
return None
def ignore_by_metadata(self, metadata):
- return False
+ return None
def ignore_by_content(self, testcase):
- return False
+ return None
def ignore_by_output1(self, output):
- return False
+ return None
def ignore_by_output2(self, output):
- return False
+ return None
class V8Suppression(Suppression):
@@ -282,23 +285,25 @@ class V8Suppression(Suppression):
# Strip off test case preamble.
try:
lines = testcase.splitlines()
- lines = lines[lines.index('print("js-mutation: start generated test case");'):]
+ lines = lines[lines.index(
+ 'print("js-mutation: start generated test case");'):]
content = '\n'.join(lines)
except ValueError:
# Search the whole test case if preamble can't be found. E.g. older
# already minimized test cases might have dropped the delimiter line.
content = testcase
- for bug, exp in IGNORE_TEST_CASES.iteritems():
- if exp.search(content):
- return bug
- return False
+ for key in ['', self.arch1, self.arch2, self.config1, self.config2]:
+ for bug, exp in IGNORE_TEST_CASES.get(key, {}).iteritems():
+ if exp.search(content):
+ return bug
+ return None
def ignore_by_metadata(self, metadata):
for bug, sources in IGNORE_SOURCES.iteritems():
for source in sources:
if source in metadata['sources']:
return bug
- return False
+ return None
def ignore_by_output1(self, output):
return self.ignore_by_output(output, self.arch1, self.config1)
@@ -312,16 +317,8 @@ class V8Suppression(Suppression):
if exp.search(output):
return bug
return None
- bug = check(IGNORE_OUTPUT.get('', {}))
- if bug:
- return bug
- bug = check(IGNORE_OUTPUT.get(arch, {}))
- if bug:
- return bug
- bug = check(IGNORE_OUTPUT.get(config, {}))
- if bug:
- return bug
- bug = check(IGNORE_OUTPUT.get('%s,%s' % (arch, config), {}))
- if bug:
- return bug
+ for key in ['', arch, config]:
+ bug = check(IGNORE_OUTPUT.get(key, {}))
+ if bug:
+ return bug
return None
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 03ecfdda30..fa9f434fb3 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -13,7 +13,7 @@ end
# Print v8::Local handle value.
define jlh
-call _v8_internal_Print_Object(*(v8::internal::Object**)(*$arg0))
+call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
end
document jlh
Print content of a v8::Local handle
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index a618d74ed3..043ecc306d 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -164,8 +164,8 @@ consts_misc = [
'value': 'Map::ElementsKindBits::kMask' },
{ 'name': 'bit_field2_elements_kind_shift',
'value': 'Map::ElementsKindBits::kShift' },
- { 'name': 'bit_field3_dictionary_map_shift',
- 'value': 'Map::DictionaryMap::kShift' },
+ { 'name': 'bit_field3_is_dictionary_map_shift',
+ 'value': 'Map::IsDictionaryMapBit::kShift' },
{ 'name': 'bit_field3_number_of_own_descriptors_mask',
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
diff --git a/deps/v8/tools/heap-stats/README.md b/deps/v8/tools/heap-stats/README.md
new file mode 100644
index 0000000000..70083fe257
--- /dev/null
+++ b/deps/v8/tools/heap-stats/README.md
@@ -0,0 +1,15 @@
+# Heap Stats
+
+Heap stats is a HTML-based tool for visualizing V8-internal object statistics.
+For example, the tool can be used to visualize how much heap memory is used for
+maintaining internal state versus actually allocated by the user.
+
+The tool consumes log files produced by d8 (or Chromium) by passing
+`--trace-gc-object-stats` or a trace captured using Chrome's tracing
+infrastructure. Chrome trace files need to be unpacked before they can
+be used though.
+
+Hosting requires a web server, e.g.:
+
+ cd tools/heap-stats
+ python -m SimpleHTTPServer 8000
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
new file mode 100644
index 0000000000..0a836d5f6c
--- /dev/null
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -0,0 +1,167 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Categories for instance types.
+const CATEGORIES = new Map([
+ [
+ 'user', new Set([
+ '*FIXED_ARRAY_CONTEXT_SUB_TYPE',
+ '*FIXED_ARRAY_COPY_ON_WRITE_SUB_TYPE',
+ '*FIXED_ARRAY_DICTIONARY_PROPERTIES_SUB_TYPE',
+ '*FIXED_ARRAY_JS_COLLECTION_SUB_TYPE',
+ '*FIXED_ARRAY_JS_WEAK_COLLECTION_SUB_TYPE',
+ '*FIXED_ARRAY_PACKED_ELEMENTS_SUB_TYPE',
+ 'CONS_ONE_BYTE_STRING_TYPE',
+ 'CONS_STRING_TYPE',
+ 'DESCRIPTOR_ARRAY_TYPE',
+ 'EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'EXTERNAL_ONE_BYTE_STRING_TYPE',
+ 'EXTERNAL_STRING_TYPE',
+ 'EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE',
+ 'FIXED_DOUBLE_ARRAY_TYPE',
+ 'FIXED_FLOAT32_ARRAY_TYPE',
+ 'FIXED_FLOAT64_ARRAY_TYPE',
+ 'FIXED_INT16_ARRAY_TYPE',
+ 'FIXED_INT32_ARRAY_TYPE',
+ 'FIXED_INT8_ARRAY_TYPE',
+ 'FIXED_UINT16_ARRAY_TYPE',
+ 'FIXED_UINT32_ARRAY_TYPE',
+ 'FIXED_UINT8_ARRAY_TYPE',
+ 'FIXED_UINT8_CLAMPED_ARRAY_TYPE',
+ 'HEAP_NUMBER_TYPE',
+ 'INTERNALIZED_STRING_TYPE',
+ 'JS_ARGUMENTS_TYPE',
+ 'JS_ARRAY_BUFFER_TYPE',
+ 'JS_ARRAY_TYPE',
+ 'JS_BOUND_FUNCTION_TYPE',
+ 'JS_DATE_TYPE',
+ 'JS_ERROR_TYPE',
+ 'JS_FAST_ARRAY_KEY_ITERATOR_TYPE',
+ 'JS_FAST_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FUNCTION_TYPE',
+ 'JS_GENERATOR_OBJECT_TYPE',
+ 'JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_GLOBAL_OBJECT_TYPE',
+ 'JS_GLOBAL_PROXY_TYPE',
+ 'JS_MAP_KEY_VALUE_ITERATOR_TYPE',
+ 'JS_MAP_TYPE',
+ 'JS_MESSAGE_OBJECT_TYPE',
+ 'JS_OBJECT_TYPE',
+ 'JS_PROMISE_TYPE',
+ 'JS_REGEXP_TYPE',
+ 'JS_SET_TYPE',
+ 'JS_STRING_ITERATOR_TYPE',
+ 'JS_TYPED_ARRAY_TYPE',
+ 'JS_VALUE_TYPE',
+ 'JS_WEAK_MAP_TYPE',
+ 'MUTABLE_HEAP_NUMBER_TYPE',
+ 'ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'ONE_BYTE_STRING_TYPE',
+ 'PROPERTY_ARRAY_TYPE',
+ 'SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE',
+ 'SHORT_EXTERNAL_STRING_TYPE',
+ 'SLICED_ONE_BYTE_STRING_TYPE',
+ 'SLICED_STRING_TYPE',
+ 'STRING_TYPE',
+ 'SYMBOL_TYPE',
+ 'THIN_ONE_BYTE_STRING_TYPE',
+ 'THIN_STRING_TYPE',
+ ])
+ ],
+ [
+ 'system', new Set([
+ 'ACCESS_CHECK_INFO_TYPE',
+ 'ACCESSOR_INFO_TYPE',
+ 'ACCESSOR_PAIR_TYPE',
+ 'ALLOCATION_MEMENTO_TYPE',
+ 'ALLOCATION_SITE_TYPE',
+ 'BOILERPLATE_ELEMENTS_TYPE',
+ 'BOILERPLATE_NAME_DICTIONARY_TYPE',
+ 'BOILERPLATE_PROPERTY_ARRAY_TYPE',
+ 'BYTE_ARRAY_TYPE',
+ 'CELL_TYPE',
+ 'CONTEXT_EXTENSION_TYPE',
+ '*FIXED_ARRAY_DEPENDENT_CODE_SUB_TYPE',
+ '*FIXED_ARRAY_ENUM_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_ENUM_INDICES_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_NUMBER_STRING_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_PROTOTYPE_USERS_SUB_TYPE',
+ '*FIXED_ARRAY_REGEXP_MULTIPLE_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_RETAINED_MAPS_SUB_TYPE',
+ '*FIXED_ARRAY_SCOPE_INFO_SUB_TYPE',
+ '*FIXED_ARRAY_SCRIPT_LIST_SUB_TYPE',
+ '*FIXED_ARRAY_SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_STRING_SPLIT_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_TEMPLATE_INFO_SUB_TYPE',
+ '*FIXED_ARRAY_WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE',
+ 'FOREIGN_TYPE',
+ 'FUNCTION_TEMPLATE_INFO_TYPE',
+ 'INTERCEPTOR_INFO_TYPE',
+ 'JS_API_OBJECT_TYPE',
+ 'JS_ARRAY_BOILERPLATE_TYPE',
+ 'JS_OBJECT_BOILERPLATE_TYPE',
+ 'JS_SPECIAL_API_OBJECT_TYPE',
+ 'MAP_TYPE',
+ 'OBJECT_TEMPLATE_INFO_TYPE',
+ 'ODDBALL_TYPE',
+ 'PROMISE_REACTION_JOB_INFO_TYPE',
+ 'PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE',
+ 'PROPERTY_CELL_TYPE',
+ 'PROTOTYPE_INFO_TYPE',
+ 'STACK_FRAME_INFO_TYPE',
+ 'TRANSITION_ARRAY_TYPE',
+ 'WEAK_CELL_TYPE',
+ ])
+ ],
+ [
+ 'code', new Set([
+ '*CODE_BUILTIN',
+ '*CODE_BYTECODE_HANDLER',
+ '*CODE_OPTIMIZED_FUNCTION',
+ '*CODE_REGEXP',
+ '*CODE_STUB',
+ '*FIXED_ARRAY_BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE',
+ '*FIXED_ARRAY_BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_CODE_STUBS_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_COMPILATION_CACHE_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_DEOPTIMIZATION_DATA_SUB_TYPE',
+ '*FIXED_ARRAY_EMBEDDED_OBJECT_SUB_TYPE',
+ '*FIXED_ARRAY_HANDLER_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE',
+ '*FIXED_ARRAY_OPTIMIZED_CODE_LITERALS_SUB_TYPE',
+ '*FIXED_ARRAY_SHARED_FUNCTION_INFOS_SUB_TYPE',
+ 'BYTECODE_ARRAY_TYPE',
+ 'CODE_DATA_CONTAINER_TYPE',
+ 'FEEDBACK_VECTOR_TYPE',
+ 'LOAD_HANDLER_TYPE',
+ 'SCRIPT_TYPE',
+ 'SHARED_FUNCTION_INFO_TYPE',
+ 'STORE_HANDLER_TYPE',
+ ])
+ ],
+ ['unclassified', new Set()],
+]);
+
+// Maps category to description text that is shown in html.
+const CATEGORY_NAMES = new Map([
+ ['user', 'JS'],
+ ['system', 'Metadata'],
+ ['code', 'Code'],
+ ['unclassified', 'Unclassified'],
+]);
+
+// Instance types that are constructed from their sub types and
+// should thus be hidden.
+const IGNORED_INSTANCE_TYPES = new Set([
+ 'FIXED_ARRAY_TYPE',
+ 'CODE_TYPE',
+]);
diff --git a/deps/v8/tools/heap-stats/details-selection.html b/deps/v8/tools/heap-stats/details-selection.html
new file mode 100644
index 0000000000..d60aef9669
--- /dev/null
+++ b/deps/v8/tools/heap-stats/details-selection.html
@@ -0,0 +1,72 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="details-selection-template">
+<style>
+.box {
+ border-left: dashed 1px #666666;
+ border-right: dashed 1px #666666;
+ border-bottom: dashed 1px #666666;
+ padding: 10px;
+ overflow: hidden;
+}
+
+.box:nth-of-type(1) {
+ border-top: dashed 1px #666666;
+ border-radius: 5px 5px 0px 0px;
+}
+
+.box:last-of-type {
+ border-radius: 0px 0px 5px 5px;
+}
+
+span {
+ display: block;
+ padding: 5px;
+ font-weight: bold;
+}
+
+.boxDiv {
+ padding: 3px;
+ float: left;
+}
+
+.boxDiv > label {
+ font-size: xx-small;
+}
+
+#categories {
+ margin-top: 10px;
+}
+</style>
+<h2>Data selection</h2>
+<ul>
+ <li>
+ <label for="isolate-select">
+ Isolate
+ </label>
+ <select id="isolate-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <label for="dataset-select">
+ Data set
+ </label>
+ <select id="dataset-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <input type="checkbox" id="merge-categories" checked=checked />
+ <label for="merge-categories">
+ Merge categories
+ </label>
+ </li>
+</ul>
+
+
+<div id="categories"></div>
+</template>
+<script type="text/javascript" src="categories.js"></script>
+<script type="text/javascript" src="details-selection.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/details-selection.js b/deps/v8/tools/heap-stats/details-selection.js
new file mode 100644
index 0000000000..43c000d3f4
--- /dev/null
+++ b/deps/v8/tools/heap-stats/details-selection.js
@@ -0,0 +1,211 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const details_selection_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#details-selection-template');
+
+class DetailsSelection extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(details_selection_template.content.cloneNode(true));
+ this.isolateSelect.addEventListener(
+ 'change', e => this.handleIsolateChange(e));
+ this.datasetSelect.addEventListener(
+ 'change', e => this.notifySelectionChanged(e));
+ this.$('#merge-categories')
+ .addEventListener('change', e => this.notifySelectionChanged(e));
+ }
+
+ connectedCallback() {
+ for (let category of CATEGORIES.keys()) {
+ this.$('#categories').appendChild(this.buildCategory(category));
+ }
+ }
+
+ set data(value) {
+ this._data = value;
+ this.dataChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ buildCategory(name) {
+ const div = document.createElement('div');
+ div.id = name;
+ div.classList.add('box');
+ const span = document.createElement('span');
+ div.appendChild(span);
+ span.innerHTML = CATEGORY_NAMES.get(name) + ' ';
+ const all_button = document.createElement('button');
+ span.appendChild(all_button);
+ all_button.innerHTML = 'All';
+ all_button.addEventListener('click', e => this.selectCategory(name));
+ const none_button = document.createElement('button');
+ span.appendChild(none_button);
+ none_button.innerHTML = 'None';
+ none_button.addEventListener('click', e => this.unselectCategory(name));
+ const innerDiv = document.createElement('div');
+ div.appendChild(innerDiv);
+ innerDiv.id = name + 'Content';
+ return div;
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ get datasetSelect() {
+ return this.$('#dataset-select');
+ }
+
+ get isolateSelect() {
+ return this.$('#isolate-select');
+ }
+
+ dataChanged() {
+ this.clearUI();
+ this.populateSelect('#isolate-select', Object.keys(this.data));
+ this.handleIsolateChange();
+ }
+
+ clearUI() {
+ this.selection = {categories: {}};
+ removeAllChildren(this.isolateSelect);
+ removeAllChildren(this.datasetSelect);
+ this.clearCategories();
+ }
+
+ handleIsolateChange(e) {
+ this.selection.isolate = this.isolateSelect.value;
+ if (this.selection.isolate.length === 0) {
+ this.selection.isolate = null;
+ return;
+ }
+
+ this.populateSelect(
+ '#dataset-select', this.data[this.selection.isolate].data_sets, 'live');
+ this.populateCategories();
+ this.notifySelectionChanged();
+ }
+
+ notifySelectionChanged(e) {
+ if (!this.selection.isolate) return;
+
+ this.selection.categories = {};
+ for (let category of CATEGORIES.keys()) {
+ const selected = this.selectedInCategory(category);
+ if (selected.length > 0) this.selection.categories[category] = selected;
+ }
+ this.selection.category_names = CATEGORY_NAMES;
+ this.selection.data_set = this.datasetSelect.value;
+ this.selection.merge_categories = this.$('#merge-categories').checked;
+ this.dispatchEvent(new CustomEvent(
+ 'change', {bubbles: true, composed: true, detail: this.selection}));
+ }
+
+ selectedInCategory(category) {
+ const selected = this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]:checked');
+ var tmp = [];
+ for (var val of selected.values()) tmp.push(val.value);
+ return tmp;
+ }
+
+ categoryForType(instance_type) {
+ for (let [key, value] of CATEGORIES.entries()) {
+ if (value.has(instance_type)) return key;
+ }
+ return 'unclassified';
+ }
+
+ createOption(text) {
+ const option = document.createElement('option');
+ option.value = text;
+ option.text = text;
+ return option;
+ }
+
+ populateSelect(id, iterable, autoselect = null) {
+ for (let option_value of iterable) {
+ const option = this.createOption(option_value);
+ if (autoselect === option_value) {
+ option.selected = 'selected';
+ }
+ this.$(id).appendChild(option);
+ }
+ }
+
+ clearCategories() {
+ for (const category of CATEGORIES.keys()) {
+ let f = this.$('#' + category + 'Content');
+ while (f.firstChild) {
+ f.removeChild(f.firstChild);
+ }
+ }
+ }
+
+ populateCategories() {
+ this.clearCategories();
+ const categories = {};
+ for (let cat of CATEGORIES.keys()) {
+ categories[cat] = [];
+ }
+
+ for (let instance_type of this.data[this.selection.isolate]
+ .non_empty_instance_types) {
+ if (IGNORED_INSTANCE_TYPES.has(instance_type)) continue;
+ const category = this.categoryForType(instance_type);
+ categories[category].push(instance_type);
+ }
+ for (let category of Object.keys(categories)) {
+ categories[category].sort();
+ for (let instance_type of categories[category]) {
+ this.$('#' + category + 'Content')
+ .appendChild(this.createCheckBox(instance_type, category));
+ }
+ }
+ }
+
+ unselectCategory(category) {
+ for (let checkbox of this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]')) {
+ checkbox.checked = false;
+ }
+ this.notifySelectionChanged();
+ }
+
+ selectCategory(category) {
+ for (let checkbox of this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]')) {
+ checkbox.checked = true;
+ }
+ this.notifySelectionChanged();
+ }
+
+ createCheckBox(instance_type, category) {
+ const div = document.createElement('div');
+ div.classList.add('boxDiv');
+ const input = document.createElement('input');
+ div.appendChild(input);
+ input.type = 'checkbox';
+ input.name = category + 'Checkbox';
+ input.checked = 'checked';
+ input.id = instance_type + 'Checkbox';
+ input.value = instance_type;
+ input.addEventListener('change', e => this.notifySelectionChanged(e));
+ const label = document.createElement('label');
+ div.appendChild(label);
+ label.innerText = instance_type;
+ label.htmlFor = instance_type + 'Checkbox';
+ return div;
+ }
+}
+
+customElements.define('details-selection', DetailsSelection);
diff --git a/deps/v8/tools/heap-stats/global-timeline.html b/deps/v8/tools/heap-stats/global-timeline.html
new file mode 100644
index 0000000000..788f966735
--- /dev/null
+++ b/deps/v8/tools/heap-stats/global-timeline.html
@@ -0,0 +1,16 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="global-timeline-template">
+<style>
+#chart {
+ width: 100%;
+ height: 500px;
+}
+</style>
+<div id="container" style="display: none;">
+ <h2>Timeline</h2>
+ <div id="chart"></div>
+</div>
+</template>
+<script type="text/javascript" src="global-timeline.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/global-timeline.js b/deps/v8/tools/heap-stats/global-timeline.js
new file mode 100644
index 0000000000..0533f21432
--- /dev/null
+++ b/deps/v8/tools/heap-stats/global-timeline.js
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const KB = 1024;
+const MB = KB * KB;
+
+const global_timeline_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#global-timeline-template');
+
+class GlobalTimeline extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(global_timeline_template.content.cloneNode(true));
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ set data(value) {
+ this._data = value;
+ this.stateChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ set selection(value) {
+ this._selection = value;
+ this.stateChanged();
+ }
+
+ get selection() {
+ return this._selection;
+ }
+
+ isValid() {
+ return this.data && this.selection;
+ }
+
+ hide() {
+ this.$('#container').style.display = 'none';
+ }
+
+ show() {
+ this.$('#container').style.display = 'block';
+ }
+
+ stateChanged() {
+ if (this.isValid()) {
+ this.drawChart();
+ } else {
+ this.hide();
+ }
+ }
+
+ getCategoryData() {
+ const categories = Object.keys(this.selection.categories)
+ .map(k => this.selection.category_names.get(k));
+ const labels = ['Time', ...categories];
+ const chart_data = [labels];
+ const isolate_data = this.data[this.selection.isolate];
+ Object.keys(isolate_data.gcs).forEach(gc_key => {
+ const gc_data = isolate_data.gcs[gc_key];
+ const data_set = gc_data[this.selection.data_set].instance_type_data;
+ const data = [];
+ data.push(gc_data.time);
+ Object.values(this.selection.categories).forEach(instance_types => {
+ data.push(
+ instance_types
+ .map(instance_type => {
+ return data_set[instance_type].overall;
+ })
+ .reduce((accu, current) => accu + current, 0) /
+ KB);
+ });
+ chart_data.push(data);
+ });
+ return chart_data;
+ }
+
+ getInstanceTypeData() {
+ const categories = Object.keys(this.selection.categories);
+ const instance_types =
+ Object.values(this.selection.categories)
+ .reduce((accu, current) => accu.concat(current), []);
+ const labels = ['Time', ...instance_types];
+ const chart_data = [labels];
+ const isolate_data = this.data[this.selection.isolate];
+ Object.keys(isolate_data.gcs).forEach(gc_key => {
+ const gc_data = isolate_data.gcs[gc_key];
+ const data_set = gc_data[this.selection.data_set].instance_type_data;
+ const data = [];
+ data.push(gc_data.time);
+ instance_types.forEach(instance_type => {
+ data.push(data_set[instance_type].overall / KB);
+ });
+ chart_data.push(data);
+ });
+ return chart_data;
+ }
+
+ drawChart() {
+ console.assert(this.data, 'invalid data');
+ console.assert(this.selection, 'invalid selection');
+
+ const chart_data = (this.selection.merge_categories) ?
+ this.getCategoryData() :
+ this.getInstanceTypeData();
+ const data = google.visualization.arrayToDataTable(chart_data);
+ const options = {
+ isStacked: true,
+ hAxis: {
+ title: 'Time [ms]',
+ },
+ vAxis: {title: 'Memory consumption [KBytes]'},
+ chartArea: {width: '85%', height: '70%'},
+ legend: {position: 'top', maxLines: '1'},
+ pointsVisible: true,
+ pointSize: 5,
+ explorer: {},
+ };
+ const chart = new google.visualization.AreaChart(this.$('#chart'));
+ this.show();
+ chart.draw(data, google.charts.Line.convertOptions(options));
+ }
+}
+
+customElements.define('global-timeline', GlobalTimeline);
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
new file mode 100644
index 0000000000..3c2e62b6d0
--- /dev/null
+++ b/deps/v8/tools/heap-stats/index.html
@@ -0,0 +1,88 @@
+<!DOCTYPE html>
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+ <meta charset="UTF-8">
+ <title>V8 Heap Statistics</title>
+ <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet' type='text/css'>
+ <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+
+ <link rel="import" href="details-selection.html">
+ <link rel="import" href="global-timeline.html">
+ <link rel="import" href="trace-file-reader.html">
+
+ <style type="text/css">
+
+body {
+ font-family: 'Roboto', sans-serif;
+ margin-left: 5%;
+ margin-right: 5%;
+}
+
+ </style>
+ <script type="text/javascript">
+
+'use strict';
+
+google.charts.load('current', {'packages':['line', 'corechart']});
+
+function $(id) { return document.querySelector(id); }
+
+function removeAllChildren(node) {
+ while (node.firstChild) {
+ node.removeChild(node.firstChild);
+ }
+}
+
+let state = Object.create(null);
+
+function globalDataChanged(e) {
+ state.data = e.detail;
+ // Emit one entry with the whole model for debugging purposes.
+ console.log(state.data);
+ state.selection = null;
+ $('#global-timeline').selection = state.selection;
+ $('#global-timeline').data = state.data;
+ $('#type-details').selection = state.selection;
+ $('#type-details').data = state.data;
+ $('#details-selection').data = state.data;
+}
+
+function globalSelectionChangedA(e) {
+ state.selection = e.detail;
+ $('#global-timeline').selection = state.selection;
+ $('#type-details').selection = state.selection;
+}
+
+ </script>
+</head>
+
+<body>
+ <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+ <h1>V8 Heap Statistics</h1>
+ <p>Visualize object statistics that have been gathered using</p>
+ <ul>
+ <li><code>--trace-gc-object-stats on V8</code></li>
+ <li>
+ <a
+ href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
+ tracing infrastructure</a> collecting data for the category
+ <code>v8.gc_stats</code>. The trace file needs to be unpacked (e.g. using
+ <code>gunzip</code>).
+ </li>
+ </ul>
+ <p>
+ Note that the visualizer needs to run on a web server due to HTML imports
+ requiring <a
+ href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
+ </p>
+ <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+ <global-timeline id="global-timeline"></global-timeline>
+ <type-details id="type-details"></type-details>
+</body>
+
+</html>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.html b/deps/v8/tools/heap-stats/trace-file-reader.html
new file mode 100644
index 0000000000..98c2ef0c60
--- /dev/null
+++ b/deps/v8/tools/heap-stats/trace-file-reader.html
@@ -0,0 +1,26 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="trace-file-reader-template">
+<style>
+#fileReader {
+ width: 100%;
+ height: 100px;
+ line-height: 100px;
+ text-align: center;
+ border: solid 1px #000000;
+ border-radius: 5px;
+}
+
+#fileReader > input {
+ display: none;
+}
+</style>
+<div id="fileReader">
+ <span id="label">
+ Drag and drop a trace file into this area, or click to choose from disk.
+ </span>
+ <input id="file" type="file" name="file" />
+</div>
+</template>
+<script type="text/javascript" src="trace-file-reader.js"></script>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
new file mode 100644
index 0000000000..59825fe514
--- /dev/null
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -0,0 +1,300 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const trace_file_reader_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#trace-file-reader-template');
+
+class TraceFileReader extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(trace_file_reader_template.content.cloneNode(true));
+ this.addEventListener('click', e => this.handleClick(e));
+ this.addEventListener('dragover', e => this.handleDragOver(e));
+ this.addEventListener('drop', e => this.handleChange(e));
+ this.$('#file').addEventListener('change', e => this.handleChange(e));
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ updateLabel(text) {
+ this.$('#label').innerText = text;
+ }
+
+ handleClick(event) {
+ this.$('#file').click();
+ }
+
+ handleChange(event) {
+ // Used for drop and file change.
+ event.preventDefault();
+ var host = event.dataTransfer ? event.dataTransfer : event.target;
+ this.readFile(host.files[0]);
+ }
+
+ handleDragOver(event) {
+ event.preventDefault();
+ }
+
+ connectedCallback() {}
+
+ readFile(file) {
+ if (!file) {
+ this.updateLabel('Failed to load file.');
+ return;
+ }
+
+ const result = new FileReader();
+ result.onload = (e) => {
+ let contents = e.target.result.split('\n');
+ const return_data = (e.target.result.includes('V8.GC_Objects_Stats')) ?
+ this.createModelFromChromeTraceFile(contents) :
+ this.createModelFromV8TraceFile(contents);
+ this.updateLabel('Finished loading \'' + file.name + '\'.');
+ this.dispatchEvent(new CustomEvent(
+ 'change', {bubbles: true, composed: true, detail: return_data}));
+ };
+ result.readAsText(file);
+ }
+
+ createOrUpdateEntryIfNeeded(data, keys, entry) {
+ console.assert(entry.isolate, 'entry should have an isolate');
+ if (!(entry.isolate in keys)) {
+ keys[entry.isolate] = new Set();
+ }
+ if (!(entry.isolate in data)) {
+ data[entry.isolate] = {
+ non_empty_instance_types: new Set(),
+ gcs: {},
+ zonetags: [],
+ samples: {zone: {}},
+ start: null,
+ end: null,
+ data_sets: new Set()
+ };
+ }
+ const data_object = data[entry.isolate];
+ if (('id' in entry) && !(entry.id in data_object.gcs)) {
+ data_object.gcs[entry.id] = {non_empty_instance_types: new Set()};
+ }
+ if ('time' in entry) {
+ if (data_object.end === null || data_object.end < entry.time)
+ data_object.end = entry.time;
+ if (data_object.start === null || data_object.start > entry.time)
+ data_object.start = entry.time;
+ }
+ }
+
+ createDatasetIfNeeded(data, keys, entry, data_set) {
+ if (!(data_set in data[entry.isolate].gcs[entry.id])) {
+ data[entry.isolate].gcs[entry.id][data_set] = {
+ instance_type_data: {},
+ non_empty_instance_types: new Set(),
+ overall: 0
+ };
+ data[entry.isolate].data_sets.add(data_set);
+ }
+ }
+
+ addInstanceTypeData(
+ data, keys, isolate, gc_id, data_set, instance_type, entry) {
+ keys[isolate].add(data_set);
+ data[isolate].gcs[gc_id][data_set].instance_type_data[instance_type] = {
+ overall: entry.overall,
+ count: entry.count,
+ histogram: entry.histogram,
+ over_allocated: entry.over_allocated,
+ over_allocated_histogram: entry.over_allocated_histogram
+ };
+ data[isolate].gcs[gc_id][data_set].overall += entry.overall;
+ if (entry.overall !== 0) {
+ data[isolate].gcs[gc_id][data_set].non_empty_instance_types.add(
+ instance_type);
+ data[isolate].gcs[gc_id].non_empty_instance_types.add(instance_type);
+ data[isolate].non_empty_instance_types.add(instance_type);
+ }
+ }
+
+ extendAndSanitizeModel(data, keys) {
+ const checkNonNegativeProperty = (obj, property) => {
+ console.assert(obj[property] >= 0, 'negative property', obj, property);
+ };
+
+ for (const isolate of Object.keys(data)) {
+ for (const gc of Object.keys(data[isolate].gcs)) {
+ for (const data_set_key of keys[isolate]) {
+ const data_set = data[isolate].gcs[gc][data_set_key];
+ // 1. Create a ranked instance type array that sorts instance
+ // types by memory size (overall).
+ data_set.ranked_instance_types =
+ [...data_set.non_empty_instance_types].sort(function(a, b) {
+ if (data_set.instance_type_data[a].overall >
+ data_set.instance_type_data[b].overall) {
+ return 1;
+ } else if (
+ data_set.instance_type_data[a].overall <
+ data_set.instance_type_data[b].overall) {
+ return -1;
+ }
+ return 0;
+ });
+
+ let known_count = 0;
+ let known_overall = 0;
+ let known_histogram =
+ Array(
+ data_set.instance_type_data.FIXED_ARRAY_TYPE.histogram.length)
+ .fill(0);
+ for (const instance_type in data_set.instance_type_data) {
+ if (!instance_type.startsWith('*FIXED_ARRAY')) continue;
+ const subtype = data_set.instance_type_data[instance_type];
+ known_count += subtype.count;
+ known_overall += subtype.count;
+ for (let i = 0; i < subtype.histogram.length; i++) {
+ known_histogram[i] += subtype.histogram[i];
+ }
+ }
+
+ const fixed_array_data = data_set.instance_type_data.FIXED_ARRAY_TYPE;
+ const unknown_entry = {
+ count: fixed_array_data.count - known_count,
+ overall: fixed_array_data.overall - known_overall,
+ histogram: fixed_array_data.histogram.map(
+ (value, index) => value - known_histogram[index])
+ };
+
+ // Check for non-negative values.
+ checkNonNegativeProperty(unknown_entry, 'count');
+ checkNonNegativeProperty(unknown_entry, 'overall');
+ for (let i = 0; i < unknown_entry.histogram.length; i++) {
+ checkNonNegativeProperty(unknown_entry.histogram, i);
+ }
+
+ data_set.instance_type_data['*FIXED_ARRAY_UNKNOWN_SUB_TYPE'] =
+ unknown_entry;
+ data_set.non_empty_instance_types.add(
+ '*FIXED_ARRAY_UNKNOWN_SUB_TYPE');
+ }
+ }
+ }
+ }
+
+ createModelFromChromeTraceFile(contents) {
+ console.log('Processing log as chrome trace file.');
+ const data = Object.create(null); // Final data container.
+ const keys = Object.create(null); // Collecting 'keys' per isolate.
+
+ // Pop last line in log as it might be broken.
+ contents.pop();
+ // Remove trailing comma.
+ contents[contents.length - 1] = contents[contents.length - 1].slice(0, -1);
+ // Terminate JSON.
+ const sanitized_contents = [...contents, ']}'].join('');
+ try {
+ const raw_data = JSON.parse(sanitized_contents);
+ const objects_stats_data =
+ raw_data.traceEvents.filter(e => e.name == 'V8.GC_Objects_Stats');
+ objects_stats_data.forEach(trace_data => {
+ const actual_data = trace_data.args;
+ const data_sets = new Set(Object.keys(actual_data));
+ Object.keys(actual_data).forEach(data_set => {
+ const string_entry = actual_data[data_set];
+ try {
+ const entry = JSON.parse(string_entry);
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createDatasetIfNeeded(data, keys, entry, data_set);
+ const isolate = entry.isolate;
+ const time = entry.time;
+ const gc_id = entry.id;
+ data[isolate].gcs[gc_id].time = time;
+ data[isolate].gcs[gc_id][data_set].bucket_sizes =
+ entry.bucket_sizes;
+ for (let [instance_type, value] of Object.entries(
+ entry.type_data)) {
+ // Trace file format uses markers that do not have actual
+ // properties.
+ if (!('overall' in value)) continue;
+ this.addInstanceTypeData(
+ data, keys, isolate, gc_id, data_set, instance_type, value);
+ }
+ } catch (e) {
+ console.log('Unable to parse data set entry', e);
+ }
+ });
+ });
+ } catch (e) {
+ console.log('Unable to parse chrome trace file.', e);
+ }
+ this.extendAndSanitizeModel(data, keys);
+ return data;
+ }
+
+ createModelFromV8TraceFile(contents) {
+ console.log('Processing log as V8 trace file.');
+ contents = contents.map(function(line) {
+ try {
+ // Strip away a potentially present adb logcat prefix.
+ line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, '');
+ return JSON.parse(line);
+ } catch (e) {
+ console.log('Unable to parse line: \'' + line + '\'\' (' + e + ')');
+ }
+ return null;
+ });
+
+ const data = Object.create(null); // Final data container.
+ const keys = Object.create(null); // Collecting 'keys' per isolate.
+
+ for (var entry of contents) {
+ if (entry === null || entry.type === undefined) {
+ continue;
+ }
+ if (entry.type === 'zone') {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ const stacktrace = ('stacktrace' in entry) ? entry.stacktrace : [];
+ data[entry.isolate].samples.zone[entry.time] = {
+ allocated: entry.allocated,
+ pooled: entry.pooled,
+ stacktrace: stacktrace
+ };
+ } else if (
+ entry.type === 'zonecreation' || entry.type === 'zonedestruction') {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ data[entry.isolate].zonetags.push(
+ Object.assign({opening: entry.type === 'zonecreation'}, entry));
+ } else if (entry.type === 'gc_descriptor') {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ data[entry.isolate].gcs[entry.id].time = entry.time;
+ if ('zone' in entry)
+ data[entry.isolate].gcs[entry.id].malloced = entry.zone;
+ } else if (entry.type === 'instance_type_data') {
+ if (entry.id in data[entry.isolate].gcs) {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ this.addInstanceTypeData(
+ data, keys, entry.isolate, entry.id, entry.key,
+ entry.instance_type_name, entry);
+ }
+ } else if (entry.type === 'bucket_sizes') {
+ if (entry.id in data[entry.isolate].gcs) {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ data[entry.isolate].gcs[entry.id][entry.key].bucket_sizes =
+ entry.sizes;
+ }
+ } else {
+ console.log('Unknown entry type: ' + entry.type);
+ }
+ }
+ this.extendAndSanitizeModel(data, keys);
+ return data;
+ }
+}
+
+customElements.define('trace-file-reader', TraceFileReader);
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 105be0c1b6..0107436df6 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -50,10 +50,12 @@ def ToCArray(byte_sequence):
return textwrap.fill(joined, 80)
-def RemoveCommentsAndTrailingWhitespace(lines):
+def RemoveCommentsEmptyLinesAndWhitespace(lines):
+ lines = re.sub(r'\n+', '\n', lines) # empty lines
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
- lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+ lines = re.sub(r'\s+\n', '\n', lines) # trailing whitespace
+ lines = re.sub(r'\n\s+', '\n', lines) # initial whitespace
return lines
@@ -342,7 +344,7 @@ def BuildFilterChain(macro_filename, message_template_file):
filter_chain.append(lambda l: ExpandConstants(l, message_templates))
filter_chain.extend([
- RemoveCommentsAndTrailingWhitespace,
+ RemoveCommentsEmptyLinesAndWhitespace,
ExpandInlineMacros,
ExpandInlineConstants,
Validate,
@@ -355,7 +357,7 @@ def BuildFilterChain(macro_filename, message_template_file):
return reduce(chain, filter_chain)
def BuildExtraFilterChain():
- return lambda x: RemoveCommentsAndTrailingWhitespace(Validate(x))
+ return lambda x: RemoveCommentsEmptyLinesAndWhitespace(Validate(x))
class Sources:
def __init__(self):
@@ -365,7 +367,7 @@ class Sources:
def IsDebuggerFile(filename):
- return "debug" in filename
+ return os.path.basename(os.path.dirname(filename)) == "debug"
def IsMacroFile(filename):
return filename.endswith("macros.py")
diff --git a/deps/v8/tools/map-processor b/deps/v8/tools/map-processor
new file mode 100755
index 0000000000..c0713bdf13
--- /dev/null
+++ b/deps/v8/tools/map-processor
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if ! expr "X${arg}" : "^X-" > /dev/null; then
+ log_file=${arg}
+ fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+ D8_PATH=`pwd`/out/native
+ d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ echo "d8 shell not found in $D8_PATH"
+ echo "To build, execute 'make native' from the V8 directory"
+ exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+ $tools_path/csvparser.js $tools_path/consarray.js \
+ $tools_path/profile.js $tools_path/profile_view.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/map-processor.js $tools_path/SourceMap.js \
+ $tools_path/map-processor-driver.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/map-processor-driver.js b/deps/v8/tools/map-processor-driver.js
new file mode 100644
index 0000000000..31a4860849
--- /dev/null
+++ b/deps/v8/tools/map-processor-driver.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function processArguments(args) {
+ var processor = new ArgumentsProcessor(args);
+ if (processor.parse()) {
+ return processor.result();
+ } else {
+ processor.printUsageAndExit();
+ }
+}
+
+function initSourceMapSupport() {
+ // Pull dev tools source maps into our name space.
+ SourceMap = WebInspector.SourceMap;
+
+ // Overwrite the load function to load scripts synchronously.
+ SourceMap.load = function(sourceMapURL) {
+ var content = readFile(sourceMapURL);
+ var sourceMapObject = (JSON.parse(content));
+ return new SourceMap(sourceMapURL, sourceMapObject);
+ };
+}
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+ initSourceMapSupport();
+ sourceMap = SourceMap.load(params.sourceMap);
+}
+var mapProcessor = new MapProcessor();
+mapProcessor.processLogFile(params.logFileName);
diff --git a/deps/v8/tools/map-processor.html b/deps/v8/tools/map-processor.html
new file mode 100644
index 0000000000..4029e96343
--- /dev/null
+++ b/deps/v8/tools/map-processor.html
@@ -0,0 +1,1254 @@
+<!DOCTYPE html>
+<html>
+ <!--
+ Copyright 2017 the V8 project authors. All rights reserved. Use of this source
+ code is governed by a BSD-style license that can be found in the LICENSE file.
+ -->
+<head>
+<meta charset="UTF-8">
+<style>
+html, body {
+ font-family: sans-serif;
+ padding: 0px;
+ margin: 0px;
+}
+h1, h2, h3, section {
+ padding-left: 15px;
+}
+#stats table {
+ display: inline-block;
+ padding-right: 50px;
+}
+#stats .transitionTable {
+ max-height: 200px;
+ overflow-y: scroll;
+}
+#timeline {
+ position: relative;
+ height: 300px;
+ overflow-y: hidden;
+ overflow-x: scroll;
+ user-select: none;
+}
+#timelineChunks {
+ height: 250px;
+ position: absolute;
+ margin-right: 100px;
+}
+#timelineCanvas {
+ height: 250px;
+ position: relative;
+ overflow: visible;
+ pointer-events: none;
+}
+.chunk {
+ width: 6px;
+ border: 0px white solid;
+ border-width: 0 2px 0 2px;
+ position: absolute;
+ background-size: 100% 100%;
+ image-rendering: pixelated;
+ bottom: 0px;
+}
+.timestamp {
+ height: 250px;
+ width: 100px;
+ border-left: 1px black dashed;
+ padding-left: 4px;
+ position: absolute;
+ pointer-events: none;
+ font-size: 10px;
+ opacity: 0.5;
+}
+#timelineOverview {
+ width: 100%;
+ height: 50px;
+ position: relative;
+ margin-top: -50px;
+ margin-bottom: 10px;
+ background-size: 100% 100%;
+ border: 1px black solid;
+ border-width: 1px 0 1px 0;
+ overflow: hidden;
+}
+#timelineOverviewIndicator {
+ height: 100%;
+ position: absolute;
+ box-shadow: 0px 2px 20px -5px black inset;
+ top: 0px;
+ cursor: ew-resize;
+}
+#timelineOverviewIndicator .leftMask,
+#timelineOverviewIndicator .rightMask {
+ background-color: rgba(200, 200, 200, 0.5);
+ width: 10000px;
+ height: 100%;
+ position: absolute;
+ top: 0px;
+}
+#timelineOverviewIndicator .leftMask {
+ right: 100%;
+}
+#timelineOverviewIndicator .rightMask {
+ left: 100%;
+}
+#mapDetails {
+ font-family: monospace;
+ white-space: pre;
+}
+#transitionView {
+ overflow-x: scroll;
+ white-space: nowrap;
+ min-height: 50px;
+ max-height: 200px;
+ padding: 50px 0 0 0;
+ margin-top: -25px;
+ width: 100%;
+}
+.map {
+ width: 20px;
+ height: 20px;
+ display: inline-block;
+ border-radius: 50%;
+ background-color: black;
+ border: 4px solid white;
+ font-size: 10px;
+ text-align: center;
+ line-height: 18px;
+ color: white;
+ vertical-align: top;
+ margin-top: -13px;
+ /* raise z-index */
+ position: relative;
+ z-index: 2;
+ cursor: pointer;
+}
+.map.selected {
+ border-color: black;
+}
+.transitions {
+ display: inline-block;
+ margin-left: -15px;
+}
+.transition {
+ min-height: 55px;
+ margin: 0 0 -2px 2px;
+}
+/* gray out deprecated transitions */
+.deprecated > .transitionEdge,
+.deprecated > .map {
+ opacity: 0.5;
+}
+.deprecated > .transition {
+ border-color: rgba(0, 0, 0, 0.5);
+}
+/* Show a border for all but the first transition */
+.transition:nth-of-type(2),
+.transition:nth-last-of-type(n+2) {
+ border-left: 2px solid;
+ margin-left: 0px;
+}
+/* special case for 2 transitions */
+.transition:nth-last-of-type(1) {
+ border-left: none;
+}
+/* topmost transitions are not related */
+#transitionView > .transition {
+ border-left: none;
+}
+/* topmost transition edge needs initial offset to be aligned */
+#transitionView > .transition > .transitionEdge {
+ margin-left: 13px;
+}
+.transitionEdge {
+ height: 2px;
+ width: 80px;
+ display: inline-block;
+ margin: 0 0 2px 0;
+ background-color: black;
+ vertical-align: top;
+ padding-left: 15px;
+}
+.transitionLabel {
+ color: black;
+ transform: rotate(-15deg);
+ transform-origin: top left;
+ margin-top: -10px;
+ font-size: 10px;
+ white-space: normal;
+ word-break: break-all;
+ background-color: rgba(255,255,255,0.5);
+}
+.red {
+ background-color: red;
+}
+.green {
+ background-color: green;
+}
+.yellow {
+ background-color: yellow;
+ color: black;
+}
+.blue {
+ background-color: blue;
+}
+.orange {
+ background-color: orange;
+}
+.violet {
+ background-color: violet;
+ color: black;
+}
+.showSubtransitions {
+ width: 0;
+ height: 0;
+ border-left: 6px solid transparent;
+ border-right: 6px solid transparent;
+ border-top: 10px solid black;
+ cursor: zoom-in;
+ margin: 4px 0 0 4px;
+}
+.showSubtransitions.opened {
+ border-top: none;
+ border-bottom: 10px solid black;
+ cursor: zoom-out;
+}
+#tooltip {
+ position: absolute;
+ width: 10px;
+ height: 10px;
+ background-color: red;
+ pointer-events: none;
+ z-index: 100;
+ display: none;
+}
+</style>
+<script src="./splaytree.js"></script>
+<script src="./codemap.js"></script>
+<script src="./csvparser.js"></script>
+<script src="./consarray.js"></script>
+<script src="./profile.js"></script>
+<script src="./profile_view.js"></script>
+<script src="./logreader.js"></script>
+<script src="./SourceMap.js"></script>
+<script src="./arguments.js"></script>
+<script src="./map-processor.js"></script>
+<script>
+"use strict"
+// =========================================================================
+const kChunkHeight = 250;
+const kChunkWidth = 10;
+
+class State {
+ constructor() {
+ this._nofChunks = 400;
+ this._map = undefined;
+ this._timeline = undefined;
+ this._chunks = undefined;
+ this._view = new View(this);
+ this._navigation = new Navigation(this, this.view);
+ }
+ get timeline() { return this._timeline }
+ set timeline(value) {
+ this._timeline = value;
+ this.updateChunks();
+ this.view.updateTimeline();
+ this.view.updateStats();
+ }
+ get chunks() { return this._chunks }
+ get nofChunks() { return this._nofChunks }
+ set nofChunks(count) {
+ this._nofChunks = count;
+ this.updateChunks();
+ this.view.updateTimeline();
+ }
+ get view() { return this._view }
+ get navigation() { return this._navigation }
+ get map() { return this._map }
+ set map(value) {
+ this._map = value;
+ this._navigation.updateUrl();
+ this.view.updateMapDetails();
+ this.view.redraw();
+ }
+ updateChunks() {
+ this._chunks = this._timeline.chunks(this._nofChunks);
+ }
+ get entries() {
+ if (!this.map) return {};
+ return {
+ map: this.map.id,
+ time: this.map.time
+ }
+ }
+}
+
+// =========================================================================
+// DOM Helper
+function $(id) {
+ return document.getElementById(id)
+}
+
+function removeAllChildren(node) {
+ while (node.lastChild) {
+ node.removeChild(node.lastChild);
+ }
+}
+
+function selectOption(select, match) {
+ let options = select.options;
+ for (let i = 0; i < options.length; i++) {
+ if (match(i, options[i])) {
+ select.selectedIndex = i;
+ return;
+ }
+ }
+}
+
+function div(classes) {
+ let node = document.createElement('div');
+ if (classes !== void 0) {
+ if (typeof classes == "string") {
+ node.classList.add(classes);
+ } else {
+ classes.forEach(cls => node.classList.add(cls));
+ }
+ }
+ return node;
+}
+
+function table(className) {
+ let node = document.createElement("table")
+ if (className) node.classList.add(className)
+ return node;
+}
+function td(text) {
+ let node = document.createElement("td");
+ node.innerText = text;
+ return node;
+}
+function tr() {
+ let node = document.createElement("tr");
+ return node;
+}
+
+function define(prototype, name, fn) {
+ Object.defineProperty(prototype, name, {value:fn, enumerable:false});
+}
+
+define(Array.prototype, "max", function(fn) {
+ if (this.length == 0) return undefined;
+ if (fn == undefined) fn = (each) => each;
+ let max = fn(this[0]);
+ for (let i = 1; i < this.length; i++) {
+ max = Math.max(max, fn(this[i]));
+ }
+ return max;
+})
+define(Array.prototype, "histogram", function(mapFn) {
+ let histogram = [];
+ for (let i = 0; i < this.length; i++) {
+ let value = this[i];
+ let index = Math.round(mapFn(value))
+ let bucket = histogram[index];
+ if (bucket !== undefined) {
+ bucket.push(value);
+ } else {
+ histogram[index] = [value];
+ }
+ }
+ for (let i = 0; i < histogram.length; i++) {
+ histogram[i] = histogram[i] || [];
+ }
+ return histogram;
+});
+
+define(Array.prototype, "first", function() { return this[0] });
+define(Array.prototype, "last", function() { return this[this.length - 1] });
+
+// =========================================================================
+// EventHandlers
+function handleBodyLoad() {
+ let upload = $('uploadInput');
+ upload.onclick = (e) => { e.target.value = null };
+ upload.onchange = (e) => { handleLoadFile(e.target) };
+ upload.focus();
+
+ document.state = new State();
+ $("transitionView").addEventListener("mousemove", e => {
+ let tooltip = $("tooltip");
+ tooltip.style.left = e.pageX + "px";
+ tooltip.style.top = e.pageY + "px";
+ let map = e.target.map;
+ if (map) {
+ $("tooltipContents").innerText = map.description.join("\n");
+ }
+ });
+}
+
+function handleLoadFile(upload) {
+ let files = upload.files;
+ let file = files[0];
+ let reader = new FileReader();
+ reader.onload = function(evt) {
+ handleLoadText(this.result);
+ }
+ reader.readAsText(file);
+}
+
+function handleLoadText(text) {
+ let mapProcessor = new MapProcessor();
+ document.state.timeline = mapProcessor.processString(text);
+}
+
+function handleKeyDown(event) {
+ let nav = document.state.navigation;
+ switch(event.key) {
+ case "ArrowUp":
+ event.preventDefault();
+ if (event.shiftKey) {
+ nav.selectPrevEdge();
+ } else {
+ nav.moveInChunk(-1);
+ }
+ return false;
+ case "ArrowDown":
+ event.preventDefault();
+ if (event.shiftKey) {
+ nav.selectNextEdge();
+ } else {
+ nav.moveInChunk(1);
+ }
+ return false;
+ case "ArrowLeft":
+ nav.moveInChunks(false);
+ break;
+ case "ArrowRight":
+ nav.moveInChunks(true);
+ break;
+ case "+":
+ nav.increaseTimelineResolution();
+ break;
+ case "-":
+ nav.decreaseTimelineResolution();
+ break;
+ }
+};
+document.onkeydown = handleKeyDown;
+
+function handleTimelineIndicatorMove(event) {
+ if (event.buttons == 0) return;
+ let timelineTotalWidth = $("timelineCanvas").offsetWidth;
+ let factor = $("timelineOverview").offsetWidth / timelineTotalWidth;
+ $("timeline").scrollLeft += event.movementX / factor;
+}
+
+// =========================================================================
+
+Object.defineProperty(Edge.prototype, 'getColor', { value:function() {
+ return transitionTypeToColor(this.type);
+}});
+
+class Navigation {
+ constructor(state, view) {
+ this.state = state;
+ this.view = view;
+ }
+ get map() { return this.state.map }
+ set map(value) { this.state.map = value }
+ get chunks() { return this.state.chunks }
+
+ increaseTimelineResolution() {
+ this.state.nofChunks *= 1.5;
+ }
+
+ decreaseTimelineResolution() {
+ this.state.nofChunks /= 1.5;
+ }
+
+ selectNextEdge() {
+ if (!this.map) return;
+ if (this.map.children.length != 1) return;
+ this.map = this.map.children[0].to;
+ }
+
+ selectPrevEdge() {
+ if (!this.map) return;
+ if (!this.map.parent()) return;
+ this.map = this.map.parent();
+ }
+
+ selectDefaultMap() {
+ this.map = this.chunks[0].at(0);
+ }
+ moveInChunks(next) {
+ if (!this.map) return this.selectDefaultMap();
+ let chunkIndex = this.map.chunkIndex(this.chunks);
+ let chunk = this.chunks[chunkIndex];
+ let index = chunk.indexOf(this.map);
+ if (next) {
+ chunk = chunk.next(this.chunks);
+ } else {
+ chunk = chunk.prev(this.chunks);
+ }
+ if (!chunk) return;
+ index = Math.min(index, chunk.size()-1);
+ this.map = chunk.at(index);
+ }
+
+ moveInChunk(delta) {
+ if (!this.map) return this.selectDefaultMap();
+ let chunkIndex = this.map.chunkIndex(this.chunks)
+ let chunk = this.chunks[chunkIndex];
+ let index = chunk.indexOf(this.map) + delta;
+ let map;
+ if (index < 0) {
+ map = chunk.prev(this.chunks).last();
+ } else if (index >= chunk.size()) {
+ map = chunk.next(this.chunks).first()
+ } else {
+ map = chunk.at(index);
+ }
+ this.map = map;
+ }
+
+ updateUrl() {
+ let entries = this.state.entries;
+ let params = new URLSearchParams(entries);
+ window.history.pushState(entries, "", "?" + params.toString());
+ }
+}
+
+class View {
+ constructor(state) {
+ this.state = state;
+ setInterval(this.updateOverviewWindow, 50);
+ this.backgroundCanvas = document.createElement("canvas");
+ this.transitionView = new TransitionView(state, $("transitionView"));
+ this.statsView = new StatsView(state, $("stats"));
+ this.isLocked = false;
+ }
+ get chunks() { return this.state.chunks }
+ get timeline() { return this.state.timeline }
+ get map() { return this.state.map }
+
+ updateStats() {
+ this.statsView.update();
+ }
+
+ updateMapDetails() {
+ let details = "";
+ if (this.map) {
+ details += "ID: " + this.map.id;
+ details += "\n" + this.map.description;
+ }
+ $("mapDetails").innerText = details;
+ this.transitionView.showMap(this.map);
+ }
+
+ updateTimeline() {
+ let chunksNode = $("timelineChunks");
+ removeAllChildren(chunksNode);
+ let chunks = this.chunks;
+ let max = chunks.max(each => each.size());
+ let start = this.timeline.startTime;
+ let end = this.timeline.endTime;
+ let duration = end - start;
+ const timeToPixel = chunks.length * kChunkWidth / duration;
+ let addTimestamp = (time, name) => {
+ let timeNode = div("timestamp");
+ timeNode.innerText = name;
+ timeNode.style.left = ((time-start) * timeToPixel) + "px";
+ chunksNode.appendChild(timeNode);
+ };
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ let height = (chunk.size() / max * kChunkHeight);
+ chunk.height = height;
+ if (chunk.isEmpty()) continue;
+ let node = div();
+ node.className = "chunk";
+ node.style.left = (i * kChunkWidth) + "px";
+ node.style.height = height + "px";
+ node.chunk = chunk;
+ node.addEventListener("mousemove", e => this.handleChunkMouseMove(e));
+ node.addEventListener("click", e => this.handleChunkClick(e));
+ node.addEventListener("dblclick", e => this.handleChunkDoubleClick(e));
+ this.setTimelineChunkBackground(chunk, node);
+ chunksNode.appendChild(node);
+ chunk.markers.forEach(marker => addTimestamp(marker.time, marker.name));
+ }
+ // Put a time marker roughly every 20 chunks.
+ let expected = duration / chunks.length * 20;
+ let interval = (10 ** Math.floor(Math.log10(expected)));
+ let correction = Math.log10(expected / interval);
+ correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5;
+ interval *= correction;
+
+ let time = start;
+ while (time < end) {
+ addTimestamp(time, ((time-start) / 1000) + " ms");
+ time += interval;
+ }
+ this.drawOverview();
+ this.drawHistograms();
+ this.redraw();
+ }
+
+ handleChunkMouseMove(event) {
+ if (this.isLocked) return false;
+ let chunk = event.target.chunk;
+ if (!chunk) return;
+ // topmost map (at chunk.height) == map #0.
+ let relativeIndex =
+ Math.round(event.layerY / event.target.offsetHeight * chunk.size());
+ let map = chunk.at(relativeIndex);
+ this.state.map = map;
+ }
+
+ handleChunkClick(event) {
+ this.isLocked = !this.isLocked;
+ }
+
+ handleChunkDoubleClick(event) {
+ this.isLocked = true;
+ let chunk = event.target.chunk;
+ if (!chunk) return;
+ this.transitionView.showMaps(chunk.getUniqueTransitions());
+ }
+
+ setTimelineChunkBackground(chunk, node) {
+ // Render the types of transitions as bar charts
+ const kHeight = chunk.height;
+ const kWidth = 1;
+ this.backgroundCanvas.width = kWidth;
+ this.backgroundCanvas.height = kHeight;
+ let ctx = this.backgroundCanvas.getContext("2d");
+ ctx.clearRect(0, 0, kWidth, kHeight);
+ let y = 0;
+ let total = chunk.size();
+ let type, count;
+ if (true) {
+ chunk.getTransitionBreakdown().forEach(([type, count]) => {
+ ctx.fillStyle = transitionTypeToColor(type);
+ let height = count / total * kHeight;
+ ctx.fillRect(0, y, kWidth, y + height);
+ y += height;
+ });
+ } else {
+ chunk.items.forEach(map => {
+ ctx.fillStyle = transitionTypeToColor(map.getType());
+ let y = chunk.yOffset(map);
+ ctx.fillRect(0, y, kWidth, y + 1);
+ });
+ }
+
+ let imageData = this.backgroundCanvas.toDataURL("image/png");
+ node.style.backgroundImage = "url(" + imageData + ")";
+ }
+
+ updateOverviewWindow() {
+ let indicator = $("timelineOverviewIndicator");
+ let totalIndicatorWidth = $("timelineOverview").offsetWidth;
+ let div = $("timeline");
+ let timelineTotalWidth = $("timelineCanvas").offsetWidth;
+ let factor = $("timelineOverview").offsetWidth / timelineTotalWidth;
+ let width = div.offsetWidth * factor;
+ let left = div.scrollLeft * factor;
+ indicator.style.width = width + "px";
+ indicator.style.left = left + "px";
+ }
+
+ drawOverview() {
+ const height = 50;
+ const kFactor = 2;
+ let canvas = this.backgroundCanvas;
+ canvas.height = height;
+ canvas.width = window.innerWidth;
+ let ctx = canvas.getContext("2d");
+
+ let chunks = this.state.timeline.chunkSizes(canvas.width * kFactor);
+ let max = chunks.max();
+
+ ctx.clearRect(0, 0, canvas.width, height);
+ ctx.strokeStyle = "black";
+ ctx.fillStyle = "black";
+ ctx.beginPath();
+ ctx.moveTo(0,height);
+ for (let i = 0; i < chunks.length; i++) {
+ ctx.lineTo(i/kFactor, height - chunks[i]/max * height);
+ }
+ ctx.lineTo(chunks.length, height);
+ ctx.stroke();
+ ctx.closePath();
+ ctx.fill();
+ let imageData = canvas.toDataURL("image/png");
+ $("timelineOverview").style.backgroundImage = "url(" + imageData + ")";
+ }
+
+ drawHistograms() {
+ $("mapsDepthHistogram").histogram = this.timeline.depthHistogram();
+ $("mapsFanOutHistogram").histogram = this.timeline.fanOutHistogram();
+ }
+
+ drawMapsDepthHistogram() {
+ let canvas = $("mapsDepthCanvas");
+ let histogram = this.timeline.depthHistogram();
+ this.drawHistogram(canvas, histogram, true);
+ }
+
+ drawMapsFanOutHistogram() {
+ let canvas = $("mapsFanOutCanvas");
+ let histogram = this.timeline.fanOutHistogram();
+ this.drawHistogram(canvas, histogram, true, true);
+ }
+
+ drawHistogram(canvas, histogram, logScaleX=false, logScaleY=false) {
+ let ctx = canvas.getContext("2d");
+ let yMax = histogram.max(each => each.length);
+ if (logScaleY) yMax = Math.log(yMax);
+ let xMax = histogram.length;
+ if (logScaleX) xMax = Math.log(xMax);
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ ctx.beginPath();
+ ctx.moveTo(0,canvas.height);
+ for (let i = 0; i < histogram.length; i++) {
+ let x = i;
+ if (logScaleX) x = Math.log(x);
+ x = x / xMax * canvas.width;
+ let bucketLength = histogram[i].length;
+ if (logScaleY) bucketLength = Math.log(bucketLength);
+ let y = (1 - bucketLength / yMax) * canvas.height;
+ ctx.lineTo(x, y);
+ }
+ ctx.lineTo(canvas.width, canvas.height);
+ ctx.closePath;
+ ctx.stroke();
+ ctx.fill();
+ }
+
+ redraw() {
+ let canvas= $("timelineCanvas");
+ canvas.width = (this.chunks.length+1) * kChunkWidth;
+ canvas.height = kChunkHeight;
+ let ctx = canvas.getContext("2d");
+ ctx.clearRect(0, 0, canvas.width, kChunkHeight);
+ if (!this.state.map) return;
+ this.drawEdges(ctx);
+ }
+
+ setMapStyle(map, ctx) {
+ ctx.fillStyle = map.edge && map.edge.from ? "black" : "green";
+ }
+
+ setEdgeStyle(edge, ctx) {
+ let color = edge.getColor();
+ ctx.strokeStyle = color;
+ ctx.fillStyle = color;
+ }
+
+ markMap(ctx, map) {
+ let [x, y] = map.position(this.state.chunks);
+ ctx.beginPath();
+ this.setMapStyle(map, ctx);
+ ctx.arc(x, y, 3, 0, 2 * Math.PI);
+ ctx.fill();
+ ctx.beginPath();
+ ctx.fillStyle = "white";
+ ctx.arc(x, y, 2, 0, 2 * Math.PI);
+ ctx.fill();
+ }
+
+ markSelectedMap(ctx, map) {
+ let [x, y] = map.position(this.state.chunks);
+ ctx.beginPath();
+ this.setMapStyle(map, ctx);
+ ctx.arc(x, y, 6, 0, 2 * Math.PI);
+ ctx.stroke();
+ }
+
+ drawEdges(ctx) {
+ // Draw the trace of maps in reverse order to make sure the outgoing
+ // transitions of previous maps aren't drawn over.
+ const kMaxOutgoingEdges = 100;
+ let nofEdges = 0;
+ let stack = [];
+ let current = this.state.map;
+ while (current && nofEdges < kMaxOutgoingEdges) {
+ nofEdges += current.children.length;
+ stack.push(current);
+ current = current.parent();
+ }
+ ctx.save();
+ this.drawOutgoingEdges(ctx, this.state.map, 3);
+ ctx.restore();
+
+ let labelOffset = 15;
+ let xPrev = 0;
+ while (current = stack.pop()) {
+ if (current.edge) {
+ this.setEdgeStyle(current.edge, ctx);
+ let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset);
+ if (xTo == xPrev) {
+ labelOffset += 8;
+ } else {
+ labelOffset = 15
+ }
+ xPrev = xTo;
+ }
+ this.markMap(ctx, current);
+ current = current.parent();
+ ctx.save();
+ // this.drawOutgoingEdges(ctx, current, 1);
+ ctx.restore();
+ }
+ // Mark selected map
+ this.markSelectedMap(ctx, this.state.map);
+ }
+
+ drawEdge(ctx, edge, showLabel=true, labelOffset=20) {
+ if (!edge.from || !edge.to) return [-1, -1];
+ let [xFrom, yFrom] = edge.from.position(this.chunks);
+ let [xTo, yTo] = edge.to.position(this.chunks);
+ let sameChunk = xTo == xFrom;
+ if (sameChunk) labelOffset += 8;
+
+ ctx.beginPath();
+ ctx.moveTo(xFrom, yFrom);
+ let offsetX = 20;
+ let offsetY = 20;
+ let midX = xFrom + (xTo- xFrom) / 2;
+ let midY = (yFrom + yTo) / 2 - 100;
+ if (!sameChunk) {
+ ctx.quadraticCurveTo(midX, midY, xTo, yTo);
+ } else {
+ ctx.lineTo(xTo, yTo);
+ }
+ if (!showLabel) {
+ ctx.stroke();
+ } else {
+ let centerX, centerY;
+ if (!sameChunk) {
+ centerX = (xFrom/2 + midX + xTo/2)/2;
+ centerY = (yFrom/2 + midY + yTo/2)/2;
+ } else {
+ centerX = xTo;
+ centerY = yTo;
+ }
+ ctx.moveTo(centerX, centerY);
+ ctx.lineTo(centerX + offsetX, centerY - labelOffset);
+ ctx.stroke();
+ ctx.textAlign = "left";
+ ctx.fillText(edge.toString(), centerX + offsetX + 2, centerY - labelOffset)
+ }
+ return [xTo, yTo];
+ }
+
+ drawOutgoingEdges(ctx, map, max=10, depth=0) {
+ if (!map) return;
+ if (depth >= max) return;
+ ctx.globalAlpha = 0.5 - depth * (0.3/max);
+ ctx.strokeStyle = "#666";
+
+ const limit = Math.min(map.children.length, 100)
+ for (let i = 0; i < limit; i++) {
+ let edge = map.children[i];
+ this.drawEdge(ctx, edge, true);
+ this.drawOutgoingEdges(ctx, edge.to, max, depth+1);
+ }
+ }
+}
+
+
+class TransitionView {
+ constructor(state, node) {
+ this.state = state;
+ this.container = node;
+ this.currentNode = node;
+ this.currentMap = undefined;
+ }
+
+ selectMap(map) {
+ this.currentMap = map;
+ this.state.map = map;
+ }
+
+ showMap(map) {
+ if (this.currentMap === map) return;
+ this.currentMap = map;
+ this._showMaps([map]);
+ }
+
+ showMaps(list, name) {
+ this.state.view.isLocked = true;
+ this._showMaps(list);
+ }
+
+ _showMaps(list, name) {
+ // Hide the container to avoid any layouts.
+ this.container.style.display = "none";
+ removeAllChildren(this.container);
+ list.forEach(map => this.addMapAndParentTransitions(map));
+ this.container.style.display = ""
+ }
+
+ addMapAndParentTransitions(map) {
+ if (map === void 0) return;
+ this.currentNode = this.container;
+ let parents = map.getParents();
+ if (parents.length > 0) {
+ this.addTransitionTo(parents.pop());
+ parents.reverse().forEach(each => this.addTransitionTo(each));
+ }
+ let mapNode = this.addSubtransitions(map);
+ // Mark and show the selected map.
+ mapNode.classList.add("selected");
+ if (this.selectedMap == map) {
+ setTimeout(() => mapNode.scrollIntoView({
+ behavior: "smooth", block: "nearest", inline: "nearest"
+ }), 1);
+ }
+ }
+
+ addMapNode(map) {
+ let node = div("map");
+ if (map.edge) node.classList.add(map.edge.getColor());
+ node.map = map;
+ node.addEventListener("click", () => this.selectMap(map));
+ if (map.children.length > 1) {
+ node.innerText = map.children.length;
+ let showSubtree = div("showSubtransitions");
+ showSubtree.addEventListener("click", (e) => this.toggleSubtree(e, node));
+ node.appendChild(showSubtree);
+ } else if (map.children.length == 0) {
+ node.innerHTML = "&#x25CF;"
+ }
+ this.currentNode.appendChild(node);
+ return node;
+ }
+
+ addSubtransitions(map) {
+ let mapNode = this.addTransitionTo(map);
+ // Draw outgoing linear transition line.
+ let current = map;
+ while (current.children.length == 1) {
+ current = current.children[0].to;
+ this.addTransitionTo(current);
+ }
+ return mapNode;
+ }
+
+ addTransitionEdge(map) {
+ let classes = ["transitionEdge", map.edge.getColor()];
+ let edge = div(classes);
+ let labelNode = div("transitionLabel");
+ labelNode.innerText = map.edge.toString();
+ edge.appendChild(labelNode);
+ return edge;
+ }
+
+ addTransitionTo(map) {
+ // transition[ transitions[ transition[...], transition[...], ...]];
+
+ let transition = div("transition");
+ if (map.isDeprecated()) transition.classList.add("deprecated");
+ if (map.edge) {
+ transition.appendChild(this.addTransitionEdge(map));
+ }
+ let mapNode = this.addMapNode(map);
+ transition.appendChild(mapNode);
+
+ let subtree = div("transitions");
+ transition.appendChild(subtree);
+
+ this.currentNode.appendChild(transition);
+ this.currentNode = subtree;
+
+ return mapNode;
+
+ }
+
+ toggleSubtree(event, node) {
+ let map = node.map;
+ event.target.classList.toggle("opened");
+ let transitionsNode = node.parentElement.querySelector(".transitions");
+ let subtransitionNodes = transitionsNode.children;
+ if (subtransitionNodes.length <= 1) {
+ // Add subtransitions excepth the one that's already shown.
+ let visibleTransitionMap = subtransitionNodes.length == 1 ?
+ transitionsNode.querySelector(".map").map : void 0;
+ map.children.forEach(edge => {
+ if (edge.to != visibleTransitionMap) {
+ this.currentNode = transitionsNode;
+ this.addSubtransitions(edge.to);
+ }
+ });
+ } else {
+ // remove all but the first (currently selected) subtransition
+ for (let i = subtransitionNodes.length-1; i > 0; i--) {
+ transitionsNode.removeChild(subtransitionNodes[i]);
+ }
+ }
+ }
+}
+
+class StatsView {
+ constructor(state, node) {
+ this.state = state;
+ this.node = node;
+ }
+ get timeline() { return this.state.timeline }
+ get transitionView() { return this.state.view.transitionView; }
+ update() {
+ removeAllChildren(this.node);
+ this.updateGeneralStats();
+ this.updateNamedTransitionsStats();
+ }
+ updateGeneralStats() {
+ let pairs = [
+ ["Maps", e => true],
+ ["Transitions", e => e.edge && e.edge.isTransition()],
+ ["Fast to Slow", e => e.edge && e.edge.isFastToSlow()],
+ ["Slow to Fast", e => e.edge && e.edge.isSlowToFast()],
+ ["Initial Map", e => e.edge && e.edge.isInitial()],
+ ["Replace Descriptors", e => e.edge && e.edge.isReplaceDescriptors()],
+ ["Copy as Prototype", e => e.edge && e.edge.isCopyAsPrototype()],
+ ["Optimize as Prototype", e => e.edge && e.edge.isOptimizeAsPrototype()],
+ ["Deprecated", e => e.isDeprecated()],
+ ];
+
+ let text = "";
+ let tableNode = table();
+ let name, filter;
+ let total = this.timeline.size();
+ pairs.forEach(([name, filter]) => {
+ let row = tr();
+ row.maps = this.timeline.filterUniqueTransitions(filter);
+ row.addEventListener("click",
+ e => this.transitionView.showMaps(e.target.parentNode.maps));
+ row.appendChild(td(name));
+ let count = this.timeline.count(filter);
+ row.appendChild(td(count));
+ let percent = Math.round(count / total * 1000) / 10;
+ row.appendChild(td(percent + "%"));
+ tableNode.appendChild(row);
+ });
+ this.node.appendChild(tableNode);
+ };
+ updateNamedTransitionsStats() {
+ let tableNode = table("transitionTable");
+ let nameMapPairs = Array.from(this.timeline.transitions.entries());
+ nameMapPairs
+ .sort((a,b) => b[1].length - a[1].length)
+ .forEach(([name, maps]) => {
+ let row = tr();
+ row.maps = maps;
+ row.addEventListener("click",
+ e => this.transitionView.showMaps(
+ e.target.parentNode.maps.map(map => map.to)));
+ row.appendChild(td(name));
+ row.appendChild(td(maps.length));
+ tableNode.appendChild(row);
+ });
+ this.node.appendChild(tableNode);
+ }
+}
+
+// =========================================================================
+
+function transitionTypeToColor(type) {
+ switch(type) {
+ case "new": return "green";
+ case "Normalize": return "violet";
+ case "map=SlowToFast": return "orange";
+ case "InitialMap": return "yellow";
+ case "Transition": return "black";
+ case "ReplaceDescriptors": return "red";
+ }
+ return "black";
+}
+
+// ShadowDom elements =========================================================
+customElements.define('x-histogram', class extends HTMLElement {
+ constructor() {
+ super();
+ let shadowRoot = this.attachShadow({mode: 'open'});
+ const t = document.querySelector('#x-histogram-template');
+ const instance = t.content.cloneNode(true);
+ shadowRoot.appendChild(instance);
+ this._histogram = undefined;
+ this.mouseX = 0;
+ this.mouseY = 0;
+ this.canvas.addEventListener('mousemove', event => this.handleCanvasMove(event));
+ }
+ setBoolAttribute(name, value) {
+ if (value) {
+ this.setAttribute(name, "");
+ } else {
+ this.deleteAttribute(name);
+ }
+ }
+ static get observedAttributes() {
+ return ['title', 'xlog', 'ylog', 'xlabel', 'ylabel'];
+ }
+ $(query) { return this.shadowRoot.querySelector(query) }
+ get h1() { return this.$("h2") }
+ get canvas() { return this.$("canvas") }
+ get xLabelDiv() { return this.$("#xLabel") }
+ get yLabelDiv() { return this.$("#yLabel") }
+
+ get histogram() {
+ return this._histogram;
+ }
+ set histogram(array) {
+ this._histogram = array;
+ if (this._histogram) {
+ this.yMax = this._histogram.max(each => each.length);
+ this.xMax = this._histogram.length;
+ }
+ this.draw();
+ }
+
+ get title() { return this.getAttribute("title") }
+ set title(string) { this.setAttribute("title", string) }
+ get xLabel() { return this.getAttribute("xlabel") }
+ set xLabel(string) { this.setAttribute("xlabel", string)}
+ get yLabel() { return this.getAttribute("ylabel") }
+ set yLabel(string) { this.setAttribute("ylabel", string)}
+ get xLog() { return this.hasAttribute("xlog") }
+ set xLog(value) { this.setBoolAttribute("xlog", value) }
+ get yLog() { return this.hasAttribute("ylog") }
+ set yLog(value) { this.setBoolAttribute("ylog", value) }
+
+ attributeChangedCallback(name, oldValue, newValue) {
+ if (name == "title") {
+ this.h1.innerText = newValue;
+ return;
+ }
+ if (name == "ylabel") {
+ this.yLabelDiv.innerText = newValue;
+ return;
+ }
+ if (name == "xlabel") {
+ this.xLabelDiv.innerText = newValue;
+ return;
+ }
+ this.draw();
+ }
+
+ handleCanvasMove(event) {
+ this.mouseX = event.offsetX;
+ this.mouseY = event.offsetY;
+ this.draw();
+ }
+ xPosition(i) {
+ let x = i;
+ if (this.xLog) x = Math.log(x);
+ return x / this.xMax * this.canvas.width;
+ }
+ yPosition(i) {
+ let bucketLength = this.histogram[i].length;
+ if (this.yLog) {
+ return (1 - Math.log(bucketLength) / Math.log(this.yMax)) * this.drawHeight + 10;
+ } else {
+ return (1 - bucketLength / this.yMax) * this.drawHeight + 10;
+ }
+ }
+
+ get drawHeight() { return this.canvas.height - 10 }
+
+ draw() {
+ if (!this.histogram) return;
+ let width = this.canvas.width;
+ let height = this.drawHeight;
+ let ctx = this.canvas.getContext("2d");
+ if (this.xLog) yMax = Math.log(yMax);
+ let xMax = this.histogram.length;
+ if (this.yLog) xMax = Math.log(xMax);
+ ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);
+ ctx.beginPath();
+ ctx.moveTo(0, height);
+ for (let i = 0; i < this.histogram.length; i++) {
+ ctx.lineTo(this.xPosition(i), this.yPosition(i));
+ }
+ ctx.lineTo(width, height);
+ ctx.closePath;
+ ctx.stroke();
+ ctx.fill();
+ if (!this.mouseX) return;
+ ctx.beginPath();
+ let index = Math.round(this.mouseX);
+ let yBucket = this.histogram[index];
+ let y = this.yPosition(index);
+ if (this.yLog) y = Math.log(y);
+ ctx.moveTo(0, y);
+ ctx.lineTo(width-40, y);
+ ctx.moveTo(this.mouseX, 0);
+ ctx.lineTo(this.mouseX, height);
+ ctx.stroke();
+ ctx.textAlign = "left";
+ ctx.fillText(yBucket.length, width-30, y);
+ }
+});
+
+</script>
+</head>
+<template id="x-histogram-template">
+ <style>
+ #yLabel {
+ transform: rotate(90deg);
+ }
+ canvas, #yLabel, #info { float: left; }
+ #xLabel { clear: both }
+ </style>
+ <h2></h2>
+ <div id="yLabel"></div>
+ <canvas height=50></canvas>
+ <div id="info">
+ </div>
+ <div id="xLabel"></div>
+</template>
+
+<body onload="handleBodyLoad(event)" onkeypress="handleKeyDown(event)">
+ <h2>Data</h2>
+ <section>
+ <form name="fileForm">
+ <p>
+ <input id="uploadInput" type="file" name="files">
+ </p>
+ </form>
+ </section>
+
+ <h2>Stats</h2>
+ <section id="stats"></section>
+
+ <h2>Timeline</h2>
+ <div id="timeline">
+ <div id=timelineChunks></div>
+ <canvas id="timelineCanvas" ></canvas>
+ </div>
+ <div id="timelineOverview"
+ onmousemove="handleTimelineIndicatorMove(event)" >
+ <div id="timelineOverviewIndicator">
+ <div class="leftMask"></div>
+ <div class="rightMask"></div>
+ </div>
+ </div>
+
+ <h2>Transitions</h2>
+ <section id="transitionView"></section>
+ <br/>
+
+ <h2>Selected Map</h2>
+ <section id="mapDetails"></section>
+
+ <x-histogram id="mapsDepthHistogram"
+ title="Maps Depth" xlabel="depth" ylabel="nof"></x-histogram>
+ <x-histogram id="mapsFanOutHistogram" xlabel="fan-out"
+ title="Maps Fan-out" ylabel="nof"></x-histogram>
+
+ <div id="tooltip">
+ <div id="tooltipContents"></div>
+ </div>
+</body>
+</html>
diff --git a/deps/v8/tools/map-processor.js b/deps/v8/tools/map-processor.js
new file mode 100644
index 0000000000..5b0e46909c
--- /dev/null
+++ b/deps/v8/tools/map-processor.js
@@ -0,0 +1,717 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ===========================================================================
+class MapProcessor extends LogReader {
+ constructor() {
+ super();
+ this.dispatchTable_ = {
+ 'code-creation': {
+ parsers: [null, parseInt, parseInt, parseInt, parseInt, null, 'var-args'],
+ processor: this.processCodeCreation
+ },
+ 'code-move': {
+ parsers: [parseInt, parseInt],
+ 'sfi-move': {
+ parsers: [parseInt, parseInt],
+ processor: this.processCodeMove
+ },
+ 'code-delete': {
+ parsers: [parseInt],
+ processor: this.processCodeDelete
+ },
+ processor: this.processFunctionMove
+ },
+ 'map-create': {
+ parsers: [parseInt, parseInt, null],
+ processor: this.processMapCreate
+ },
+ 'map': {
+ parsers: [null, parseInt, parseInt, parseInt, parseInt, parseInt,
+ null, null, null
+ ],
+ processor: this.processMap
+ },
+ 'map-details': {
+ parsers: [parseInt, parseInt, null],
+ processor: this.processMapDetails
+ }
+ };
+ this.deserializedEntriesNames_ = [];
+ this.profile_ = new Profile();
+ this.timeline_ = new Timeline();
+ }
+
+ printError(str) {
+ console.error(str);
+ throw str
+ }
+
+ processString(string) {
+ let end = string.length;
+ let current = 0;
+ let next = 0;
+ let line;
+ let i = 0;
+ let entry;
+ try {
+ while (current < end) {
+ next = string.indexOf("\n", current);
+ if (next === -1) break;
+ i++;
+ line = string.substring(current, next);
+ current = next + 1;
+ this.processLogLine(line);
+ }
+ } catch(e) {
+ console.log("Error occurred during parsing, trying to continue: " + e);
+ }
+ return this.finalize();
+ }
+
+ processLogFile(fileName) {
+ this.collectEntries = true
+ this.lastLogFileName_ = fileName;
+ let line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+ return this.finalize();
+ }
+
+ finalize() {
+ // TODO(cbruni): print stats;
+ this.timeline_.finalize();
+ return this.timeline_;
+ }
+
+ addEntry(entry) {
+ this.entries.push(entry);
+ }
+
+ /**
+ * Parser for dynamic code optimization state.
+ */
+ parseState(s) {
+ switch (s) {
+ case "":
+ return Profile.CodeState.COMPILED;
+ case "~":
+ return Profile.CodeState.OPTIMIZABLE;
+ case "*":
+ return Profile.CodeState.OPTIMIZED;
+ }
+ throw new Error("unknown code state: " + s);
+ }
+
+ processCodeCreation(
+ type, kind, timestamp, start, size, name, maybe_func) {
+ name = this.deserializedEntriesNames_[start] || name;
+ if (name.startsWith("onComplete")) {
+ console.log(name);
+ }
+ if (maybe_func.length) {
+ let funcAddr = parseInt(maybe_func[0]);
+ let state = this.parseState(maybe_func[1]);
+ this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
+ } else {
+ this.profile_.addCode(type, name, timestamp, start, size);
+ }
+ }
+
+ processCodeMove(from, to) {
+ this.profile_.moveCode(from, to);
+ }
+
+ processCodeDelete(start) {
+ this.profile_.deleteCode(start);
+ }
+
+ processFunctionMove(from, to) {
+ this.profile_.moveFunc(from, to);
+ }
+
+ formatPC(pc, line, column) {
+ let entry = this.profile_.findEntry(pc);
+ if (!entry) return "<unknown>"
+ if (entry.type == "Builtin") {
+ return entry.name;
+ }
+ let name = entry.func.getName();
+ let re = /(.*):[0-9]+:[0-9]+$/;
+ let array = re.exec(name);
+ if (!array) {
+ entry = name;
+ } else {
+ entry = entry.getState() + array[1];
+ }
+ return entry + ":" + line + ":" + column;
+ }
+
+ processMap(type, time, from, to, pc, line, column, reason, name) {
+ time = parseInt(time);
+ if (type == "Deprecate") return this.deprecateMap(type, time, from);
+ from = this.getExistingMap(from, time);
+ to = this.getExistingMap(to, time);
+ let edge = new Edge(type, name, reason, time, from, to);
+ edge.filePosition = this.formatPC(pc, line, column);
+ edge.finishSetup();
+ }
+
+ deprecateMap(type, time, id) {
+ this.getExistingMap(id, time).deprecate();
+ }
+
+ processMapCreate(time, id, string) {
+ // map-create events might override existing maps if the addresses get
+ // rcycled. Hence we do not check for existing maps.
+ let map = this.createMap(id, time);
+ map.description = string;
+ }
+
+ processMapDetails(time, id, string) {
+ //TODO(cbruni): fix initial map logging.
+ let map = this.getExistingMap(id, time);
+ if (!map.description) {
+ map.description = string;
+ }
+ }
+
+ createMap(id, time) {
+ if (id == 0x1821257d1761) {
+ console.log(id);
+ }
+ let map = new V8Map(id, time);
+ this.timeline_.push(map);
+ return map;
+ }
+
+ getExistingMap(id, time) {
+ if (id === 0) return undefined;
+ let map = V8Map.get(id);
+ if (map === undefined) {
+ console.error("No map details provided: id=" + id);
+ // Manually patch in a map to continue running.
+ return this.createMap(id, time);
+ };
+ return map;
+ }
+}
+
+// ===========================================================================
+
+class V8Map {
+ constructor(id, time = -1) {
+ if (!id) throw "Invalid ID";
+ this.id = id;
+ this.time = time;
+ if (!(time > 0)) throw "Invalid time";
+ this.description = "";
+ this.edge = void 0;
+ this.children = [];
+ this.depth = 0;
+ this._isDeprecated = false;
+ this.deprecationTargets = null;
+ V8Map.set(id, this);
+ this.leftId = 0;
+ this.rightId = 0;
+ }
+
+ finalize(id) {
+ // Initialize preorder tree traversal Ids for fast subtree inclusion checks
+ if (id <= 0) throw "invalid id";
+ let currentId = id;
+ this.leftId = currentId
+ this.children.forEach(edge => {
+ let map = edge.to;
+ currentId = map.finalize(currentId + 1);
+ });
+ this.rightId = currentId + 1;
+ return currentId + 1;
+ }
+
+ parent() {
+ if (this.edge === void 0) return void 0;
+ return this.edge.from;
+ }
+
+ isDeprecated() {
+ return this._isDeprecated;
+ }
+
+ deprecate() {
+ this._isDeprecated = true;
+ }
+
+ isRoot() {
+ return this.edge == void 0 || this.edge.from == void 0;
+ }
+
+ contains(map) {
+ return this.leftId < map.leftId && map.rightId < this.rightId;
+ }
+
+ addEdge(edge) {
+ this.children.push(edge);
+ }
+
+ chunkIndex(chunks) {
+ // Did anybody say O(n)?
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ if (chunk.isEmpty()) continue;
+ if (chunk.last().time < this.time) continue;
+ return i;
+ }
+ return -1;
+ }
+
+ position(chunks) {
+ let index = this.chunkIndex(chunks);
+ let xFrom = (index + 0.5) * kChunkWidth;
+ let yFrom = kChunkHeight - chunks[index].yOffset(this);
+ return [xFrom, yFrom];
+ }
+
+ transitions() {
+ let transitions = Object.create(null);
+ let current = this;
+ while (current) {
+ let edge = current.edge;
+ if (edge && edge.isTransition()) {
+ transitions[edge.name] = edge;
+ }
+ current = current.parent()
+ }
+ return transitions;
+ }
+
+ getType() {
+ return this.edge === void 0 ? "new" : this.edge.type;
+ }
+
+ getParents() {
+ let parents = [];
+ let current = this.parent();
+ while (current) {
+ parents.push(current);
+ current = current.parent();
+ }
+ return parents;
+ }
+
+ static get(id) {
+ if (!this.cache) return undefined;
+ return this.cache.get(id);
+ }
+
+ static set(id, map) {
+ if (!this.cache) this.cache = new Map();
+ this.cache.set(id, map);
+ }
+}
+
+
+// ===========================================================================
+class Edge {
+ constructor(type, name, reason, time, from, to) {
+ this.type = type;
+ this.name = name;
+ this.reason = reason;
+ this.time = time;
+ this.from = from;
+ this.to = to;
+ this.filePosition = "";
+ }
+
+ finishSetup() {
+ if (this.from) this.from.addEdge(this);
+ if (this.to) {
+ this.to.edge = this;
+ if (this.to === this.from) throw "From and to must be distinct.";
+ if (this.from) {
+ if (this.to.time < this.from.time) {
+ console.error("invalid time order");
+ }
+ let newDepth = this.from.depth + 1;
+ if (this.to.depth > 0 && this.to.depth != newDepth) {
+ console.error("Depth has already been initialized");
+ }
+ this.to.depth = newDepth;
+ }
+ }
+ }
+
+ chunkIndex(chunks) {
+ // Did anybody say O(n)?
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ if (chunk.isEmpty()) continue;
+ if (chunk.last().time < this.time) continue;
+ return i;
+ }
+ return -1;
+ }
+
+ parentEdge() {
+ if (!this.from) return undefined;
+ return this.from.edge;
+ }
+
+ chainLength() {
+ let length = 0;
+ let prev = this;
+ while (prev) {
+ prev = this.parent;
+ length++;
+ }
+ return length;
+ }
+
+ isTransition() {
+ return this.type == "Transition"
+ }
+
+ isFastToSlow() {
+ return this.type == "Normalize"
+ }
+
+ isSlowToFast() {
+ return this.type == "SlowToFast"
+ }
+
+ isInitial() {
+ return this.type == "InitialMap"
+ }
+
+ isReplaceDescriptors() {
+ return this.type == "ReplaceDescriptors"
+ }
+
+ isCopyAsPrototype() {
+ return this.reason == "CopyAsPrototype"
+ }
+
+ isOptimizeAsPrototype() {
+ return this.reason == "OptimizeAsPrototype"
+ }
+
+ symbol() {
+ if (this.isTransition()) return "+";
+ if (this.isFastToSlow()) return "⊡";
+ if (this.isSlowToFast()) return "⊛";
+ if (this.isReplaceDescriptors()) {
+ if (this.name) return "+";
+ return "∥";
+ }
+ return "";
+ }
+
+ toString() {
+ let s = this.symbol();
+ if (this.isTransition()) return s + this.name;
+ if (this.isFastToSlow()) return s + this.reason;
+ if (this.isCopyAsPrototype()) return s + "Copy as Prototype";
+ if (this.isOptimizeAsPrototype()) {
+ return s + "Optimize as Prototype";
+ }
+ if (this.isReplaceDescriptors() && this.name) {
+ return this.type + " " + this.symbol() + this.name;
+ }
+ return this.type + " " + (this.reason ? this.reason : "") + " " +
+ (this.name ? this.name : "")
+ }
+}
+
+
+// ===========================================================================
+class Marker {
+ constructor(time, name) {
+ this.time = parseInt(time);
+ this.name = name;
+ }
+}
+
+// ===========================================================================
+class Timeline {
+ constructor() {
+ this.values = [];
+ this.transitions = new Map();
+ this.markers = [];
+ this.startTime = 0;
+ this.endTime = 0;
+ }
+
+ push(map) {
+ let time = map.time;
+ if (!this.isEmpty() && this.last().time > time) {
+ // Invalid insertion order, might happen without --single-process,
+ // finding insertion point.
+ let insertionPoint = this.find(time);
+ this.values.splice(insertionPoint, map);
+ } else {
+ this.values.push(map);
+ }
+ if (time > 0) {
+ this.endTime = Math.max(this.endTime, time);
+ if (this.startTime === 0) {
+ this.startTime = time;
+ } else {
+ this.startTime = Math.min(this.startTime, time);
+ }
+ }
+ }
+
+ addMarker(time, message) {
+ this.markers.push(new Marker(time, message));
+ }
+
+ finalize() {
+ let id = 0;
+ this.forEach(map => {
+ if (map.isRoot()) id = map.finalize(id + 1);
+ if (map.edge && map.edge.name) {
+ let edge = map.edge;
+ let list = this.transitions.get(edge.name);
+ if (list === undefined) {
+ this.transitions.set(edge.name, [edge]);
+ } else {
+ list.push(edge);
+ }
+ }
+ });
+ this.markers.sort((a, b) => b.time - a.time);
+ }
+
+ at(index) {
+ return this.values[index]
+ }
+
+ isEmpty() {
+ return this.size() == 0
+ }
+
+ size() {
+ return this.values.length
+ }
+
+ first() {
+ return this.values.first()
+ }
+
+ last() {
+ return this.values.last()
+ }
+
+ duration() {
+ return this.last().time - this.first().time
+ }
+
+ forEachChunkSize(count, fn) {
+ const increment = this.duration() / count;
+ let currentTime = this.first().time + increment;
+ let index = 0;
+ for (let i = 0; i < count; i++) {
+ let nextIndex = this.find(currentTime, index);
+ let nextTime = currentTime + increment;
+ fn(index, nextIndex, currentTime, nextTime);
+ index = nextIndex
+ currentTime = nextTime;
+ }
+ }
+
+ chunkSizes(count) {
+ let chunks = [];
+ this.forEachChunkSize(count, (start, end) => chunks.push(end - start));
+ return chunks;
+ }
+
+ chunks(count) {
+ let chunks = [];
+ let emptyMarkers = [];
+ this.forEachChunkSize(count, (start, end, startTime, endTime) => {
+ let items = this.values.slice(start, end);
+ let markers = this.markersAt(startTime, endTime);
+ chunks.push(new Chunk(chunks.length, startTime, endTime, items, markers));
+ });
+ return chunks;
+ }
+
+ range(start, end) {
+ const first = this.find(start);
+ if (first < 0) return [];
+ const last = this.find(end, first);
+ return this.values.slice(first, last);
+ }
+
+ find(time, offset = 0) {
+ return this.basicFind(this.values, each => each.time - time, offset);
+ }
+
+ markersAt(startTime, endTime) {
+ let start = this.basicFind(this.markers, each => each.time - startTime);
+ let end = this.basicFind(this.markers, each => each.time - endTime, start);
+ return this.markers.slice(start, end);
+ }
+
+ basicFind(array, cmp, offset = 0) {
+ let min = offset;
+ let max = array.length;
+ while (min < max) {
+ let mid = min + Math.floor((max - min) / 2);
+ let result = cmp(array[mid]);
+ if (result > 0) {
+ max = mid - 1;
+ } else {
+ min = mid + 1;
+ }
+ }
+ return min;
+ }
+
+ count(filter) {
+ return this.values.reduce((sum, each) => {
+ return sum + (filter(each) ? 1 : 0);
+ }, 0);
+ }
+
+ filter(predicate) {
+ return this.values.filter(predicate);
+ }
+
+ filterUniqueTransitions(filter) {
+ // Returns a list of Maps whose parent is not in the list.
+ return this.values.filter(map => {
+ if (!filter(map)) return false;
+ let parent = map.parent();
+ if (!parent) return true;
+ return !filter(parent);
+ });
+ }
+
+ depthHistogram() {
+ return this.values.histogram(each => each.depth);
+ }
+
+ fanOutHistogram() {
+ return this.values.histogram(each => each.children.length);
+ }
+
+ forEach(fn) {
+ return this.values.forEach(fn)
+ }
+}
+
+
+// ===========================================================================
+class Chunk {
+ constructor(index, start, end, items, markers) {
+ this.index = index;
+ this.start = start;
+ this.end = end;
+ this.items = items;
+ this.markers = markers
+ this.height = 0;
+ }
+
+ isEmpty() {
+ return this.items.length == 0;
+ }
+
+ last() {
+ return this.at(this.size() - 1);
+ }
+
+ first() {
+ return this.at(0);
+ }
+
+ at(index) {
+ return this.items[index];
+ }
+
+ size() {
+ return this.items.length;
+ }
+
+ yOffset(map) {
+ // items[0] == oldest map, displayed at the top of the chunk
+ // items[n-1] == youngest map, displayed at the bottom of the chunk
+ return (1 - (this.indexOf(map) + 0.5) / this.size()) * this.height;
+ }
+
+ indexOf(map) {
+ return this.items.indexOf(map);
+ }
+
+ has(map) {
+ if (this.isEmpty()) return false;
+ return this.first().time <= map.time && map.time <= this.last().time;
+ }
+
+ next(chunks) {
+ return this.findChunk(chunks, 1);
+ }
+
+ prev(chunks) {
+ return this.findChunk(chunks, -1);
+ }
+
+ findChunk(chunks, delta) {
+ let i = this.index + delta;
+ let chunk = chunks[i];
+ while (chunk && chunk.size() == 0) {
+ i += delta;
+ chunk = chunks[i]
+ }
+ return chunk;
+ }
+
+ getTransitionBreakdown() {
+ return BreakDown(this.items, map => map.getType())
+ }
+
+ getUniqueTransitions() {
+ // Filter out all the maps that have parents within the same chunk.
+ return this.items.filter(map => !map.parent() || !this.has(map.parent()));
+ }
+}
+
+
+// ===========================================================================
+function BreakDown(list, map_fn) {
+ if (map_fn === void 0) {
+ map_fn = each => each;
+ }
+ let breakdown = {__proto__:null};
+ list.forEach(each=> {
+ let type = map_fn(each);
+ let v = breakdown[type];
+ breakdown[type] = (v | 0) + 1
+ });
+ return Object.entries(breakdown)
+ .sort((a,b) => a[1] - b[1]);
+}
+
+
+// ===========================================================================
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ return {
+ '--range': ['range', 'auto,auto',
+ 'Specify the range limit as [start],[end]'
+ ],
+ '--source-map': ['sourceMap', null,
+ 'Specify the source map that should be used for output'
+ ]
+ };
+ }
+
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ range: 'auto,auto',
+ };
+ }
+}
diff --git a/deps/v8/tools/release/backport_node.py b/deps/v8/tools/node/backport_node.py
index 5523525671..50b0b077fa 100755
--- a/deps/v8/tools/release/backport_node.py
+++ b/deps/v8/tools/node/backport_node.py
@@ -27,12 +27,19 @@ import subprocess
import re
import sys
-from common_includes import *
-
TARGET_SUBDIR = os.path.join("deps", "v8")
VERSION_FILE = os.path.join("include", "v8-version.h")
VERSION_PATTERN = r'(?<=#define V8_PATCH_LEVEL )\d+'
+def FileToText(file_name):
+ with open(file_name) as f:
+ return f.read()
+
+def TextToFile(text, file_name):
+ with open(file_name, "w") as f:
+ f.write(text)
+
+
def Clean(options):
print ">> Cleaning target directory."
subprocess.check_call(["git", "clean", "-fd"],
diff --git a/deps/v8/tools/node/build_gn.py b/deps/v8/tools/node/build_gn.py
new file mode 100755
index 0000000000..8ab2a635ea
--- /dev/null
+++ b/deps/v8/tools/node/build_gn.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to build libv8_monolith.a as dependency for Node.js
+Required dependencies can be fetched with fetch_deps.py.
+
+Usage: build_gn.py <Debug/Release> <v8-path> <build-path> [<build-flags>]...
+
+Build flags are passed either as "strings" or numeric value. True/false
+are represented as 1/0. E.g.
+
+ v8_promise_internal_field_count=2
+ target_cpu="x64"
+ v8_enable_disassembler=0
+"""
+
+import os
+import subprocess
+import sys
+
+import node_common
+
+GN_ARGS = [
+ "v8_monolithic = true",
+ "is_component_build = false",
+ "v8_use_external_startup_data = false",
+ "use_custom_libcxx = false",
+ "use_sysroot = false",
+]
+
+BUILD_SUBDIR = "gn"
+
+# TODO: make this cross-platform.
+GN_SUBDIR = ["buildtools", "linux64", "gn"]
+
+def Build(v8_path, build_path, depot_tools, is_debug, build_flags):
+ print "Setting GN args."
+ lines = []
+ lines.extend(GN_ARGS)
+ for flag in build_flags:
+ flag = flag.replace("=1", "=true")
+ flag = flag.replace("=0", "=false")
+ flag = flag.replace("target_cpu=ia32", "target_cpu=\"x86\"")
+ lines.append(flag)
+ lines.append("is_debug = %s" % ("true" if is_debug else "false"))
+ with open(os.path.join(build_path, "args.gn"), "w") as args_file:
+ args_file.write("\n".join(lines))
+ gn = os.path.join(v8_path, *GN_SUBDIR)
+ subprocess.check_call([gn, "gen", "-C", build_path], cwd=v8_path)
+ ninja = os.path.join(depot_tools, "ninja")
+ print "Building."
+ subprocess.check_call([ninja, "-v", "-C", build_path, "v8_monolith"],
+ cwd=v8_path)
+
+def Main(v8_path, build_path, is_debug, build_flags):
+ # Verify paths.
+ v8_path = os.path.abspath(v8_path)
+ assert os.path.isdir(v8_path)
+ build_path = os.path.abspath(build_path)
+ build_path = os.path.join(build_path, BUILD_SUBDIR)
+ if not os.path.isdir(build_path):
+ os.makedirs(build_path)
+
+ # Check that we have depot tools.
+ depot_tools = node_common.EnsureDepotTools(v8_path, False)
+
+ # Build with GN.
+ Build(v8_path, build_path, depot_tools, is_debug, build_flags)
+
+if __name__ == "__main__":
+ # TODO: use argparse to parse arguments.
+ build_mode = sys.argv[1]
+ v8_path = sys.argv[2]
+ build_path = sys.argv[3]
+ assert build_mode == "Debug" or build_mode == "Release"
+ is_debug = build_mode == "Debug"
+ # TODO: introduce "--" flag for pass-through flags.
+ build_flags = sys.argv[4:]
+ Main(v8_path, build_path, is_debug, build_flags)
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
new file mode 100755
index 0000000000..a3e6d74917
--- /dev/null
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to fetch all dependencies for V8 to run build_gn.py.
+
+Usage: fetch_deps.py <v8-path>
+"""
+
+import os
+import subprocess
+import sys
+
+import node_common
+
+GCLIENT_SOLUTION = [
+ { "name" : "v8",
+ "url" : "https://chromium.googlesource.com/v8/v8.git",
+ "deps_file" : "DEPS",
+ "managed" : False,
+ "custom_deps" : {
+ # These deps are already part of Node.js.
+ "v8/base/trace_event/common" : None,
+ "v8/testing/gtest" : None,
+ "v8/third_party/jinja2" : None,
+ "v8/third_party/markupsafe" : None,
+ # These deps are unnecessary for building.
+ "v8/test/benchmarks/data" : None,
+ "v8/testing/gmock" : None,
+ "v8/test/mozilla/data" : None,
+ "v8/test/test262/data" : None,
+ "v8/test/test262/harness" : None,
+ "v8/test/wasm-js" : None,
+ "v8/third_party/android_tools" : None,
+ "v8/third_party/catapult" : None,
+ "v8/third_party/colorama/src" : None,
+ "v8/third_party/instrumented_libraries" : None,
+ "v8/tools/gyp" : None,
+ "v8/tools/luci-go" : None,
+ "v8/tools/swarming_client" : None,
+ },
+ "custom_vars": {
+ "build_for_node" : True,
+ },
+ },
+]
+
+def EnsureGit(v8_path):
+ expected_git_dir = os.path.join(v8_path, ".git")
+ actual_git_dir = subprocess.check_output(
+ ["git", "rev-parse", "--absolute-git-dir"], cwd=v8_path).strip()
+ if expected_git_dir == actual_git_dir:
+ print "V8 is tracked stand-alone by git."
+ return False
+ print "Initializing temporary git repository in v8."
+ subprocess.check_call(["git", "init"], cwd=v8_path)
+ subprocess.check_call(["git", "commit", "--allow-empty", "-m", "init"],
+ cwd=v8_path)
+ return True
+
+def FetchDeps(v8_path):
+ # Verify path.
+ v8_path = os.path.abspath(v8_path)
+ assert os.path.isdir(v8_path)
+
+ # Check out depot_tools if necessary.
+ depot_tools = node_common.EnsureDepotTools(v8_path, True)
+
+ temporary_git = EnsureGit(v8_path)
+ try:
+ print "Fetching dependencies."
+ env = os.environ.copy()
+ # gclient needs to have depot_tools in the PATH.
+ env["PATH"] = depot_tools + os.pathsep + env["PATH"]
+ spec = "solutions = %s" % GCLIENT_SOLUTION
+ subprocess.check_call(["gclient", "sync", "--spec", spec],
+ cwd=os.path.join(v8_path, os.path.pardir),
+ env=env)
+ except:
+ raise
+ finally:
+ if temporary_git:
+ node_common.UninitGit(v8_path)
+ # Clean up .gclient_entries file.
+ gclient_entries = os.path.normpath(
+ os.path.join(v8_path, os.pardir, ".gclient_entries"))
+ if os.path.isfile(gclient_entries):
+ os.remove(gclient_entries)
+ # Enable building with GN for configure script.
+ return True
+
+
+if __name__ == "__main__":
+ FetchDeps(sys.argv[1])
diff --git a/deps/v8/tools/node/node_common.py b/deps/v8/tools/node/node_common.py
new file mode 100755
index 0000000000..f7ca3a6a79
--- /dev/null
+++ b/deps/v8/tools/node/node_common.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import shutil
+import stat
+import subprocess
+
+DEPOT_TOOLS_URL = \
+ "https://chromium.googlesource.com/chromium/tools/depot_tools.git"
+
+def EnsureDepotTools(v8_path, fetch_if_not_exist):
+ def _Get(v8_path):
+ depot_tools = os.path.join(v8_path, "_depot_tools")
+ try:
+ gclient_path = os.path.join(depot_tools, "gclient")
+ gclient_check = subprocess.check_output([gclient_path, "--version"])
+ if "gclient.py" in gclient_check:
+ return depot_tools
+ except:
+ pass
+ if fetch_if_not_exist:
+ print "Checking out depot_tools."
+ subprocess.check_call(["git", "clone", DEPOT_TOOLS_URL, depot_tools])
+ return depot_tools
+ return None
+ depot_tools = _Get(v8_path)
+ assert depot_tools is not None
+ print "Using depot tools in %s" % depot_tools
+ return depot_tools
+
+def UninitGit(v8_path):
+ print "Uninitializing temporary git repository"
+ target = os.path.join(v8_path, ".git")
+ if os.path.isdir(target):
+ print ">> Cleaning up %s" % target
+ def OnRmError(func, path, exec_info):
+ # This might happen on Windows
+ os.chmod(path, stat.S_IWRITE)
+ os.unlink(path)
+ shutil.rmtree(target, onerror=OnRmError)
diff --git a/deps/v8/tools/release/test_backport_node.py b/deps/v8/tools/node/test_backport_node.py
index f9c015baf1..3c61a402c4 100755
--- a/deps/v8/tools/release/test_backport_node.py
+++ b/deps/v8/tools/node/test_backport_node.py
@@ -10,7 +10,6 @@ import sys
import tempfile
import unittest
-from common_includes import FileToText
import backport_node
# Base paths.
@@ -65,7 +64,7 @@ class TestUpdateNode(unittest.TestCase):
# Check version.
version_file = os.path.join(node_cwd, "deps", "v8", "include", "v8-version.h")
- self.assertIn('#define V8_PATCH_LEVEL 4322', FileToText(version_file))
+ self.assertIn('#define V8_PATCH_LEVEL 4322', backport_node.FileToText(version_file))
if __name__ == "__main__":
unittest.main()
diff --git a/deps/v8/tools/release/test_update_node.py b/deps/v8/tools/node/test_update_node.py
index bff3d08c2f..1a29b4ea61 100755
--- a/deps/v8/tools/release/test_update_node.py
+++ b/deps/v8/tools/node/test_update_node.py
@@ -32,6 +32,7 @@ EXPECTED_GITIGNORE = """
EXPECTED_GIT_DIFF = """
create mode 100644 deps/v8/base/trace_event/common/common
rename deps/v8/baz/{delete_me => v8_new} (100%)
+ delete mode 100644 deps/v8/include/v8-version.h
rename deps/v8/{delete_me => new/v8_new} (100%)
create mode 100644 deps/v8/third_party/jinja2/jinja2
create mode 100644 deps/v8/third_party/markupsafe/markupsafe
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/.gitignore b/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
index 23c2024827..23c2024827 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/.gitignore
+++ b/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me
+++ b/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo
+++ b/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/delete_me
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/delete_me
+++ b/deps/v8/tools/node/testdata/node/deps/v8/delete_me
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h b/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
index fe8b2712e3..fe8b2712e3 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h
+++ b/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/v8_foo
+++ b/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
diff --git a/deps/v8/tools/release/testdata/v8/.gitignore b/deps/v8/tools/node/testdata/v8/.gitignore
index 855286229f..855286229f 100644
--- a/deps/v8/tools/release/testdata/v8/.gitignore
+++ b/deps/v8/tools/node/testdata/v8/.gitignore
diff --git a/deps/v8/tools/release/testdata/v8/base/trace_event/common/common b/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
index e69de29bb2..e69de29bb2 100644
--- a/deps/v8/tools/release/testdata/v8/base/trace_event/common/common
+++ b/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
diff --git a/deps/v8/tools/release/testdata/v8/baz/v8_foo b/deps/v8/tools/node/testdata/v8/baz/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/baz/v8_foo
+++ b/deps/v8/tools/node/testdata/v8/baz/v8_foo
diff --git a/deps/v8/tools/release/testdata/v8/baz/v8_new b/deps/v8/tools/node/testdata/v8/baz/v8_new
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/baz/v8_new
+++ b/deps/v8/tools/node/testdata/v8/baz/v8_new
diff --git a/deps/v8/tools/release/testdata/v8/new/v8_new b/deps/v8/tools/node/testdata/v8/new/v8_new
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/new/v8_new
+++ b/deps/v8/tools/node/testdata/v8/new/v8_new
diff --git a/deps/v8/tools/release/testdata/v8/v8_foo b/deps/v8/tools/node/testdata/v8/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/v8_foo
+++ b/deps/v8/tools/node/testdata/v8/v8_foo
diff --git a/deps/v8/tools/release/testdata/v8/v8_new b/deps/v8/tools/node/testdata/v8/v8_new
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/v8_new
+++ b/deps/v8/tools/node/testdata/v8/v8_new
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
new file mode 100755
index 0000000000..ebd953a903
--- /dev/null
+++ b/deps/v8/tools/node/update_node.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to update V8 in a Node.js checkout.
+
+Requirements:
+ - Node.js checkout in which V8 should be updated.
+ - V8 checkout at the commit to which Node.js should be updated.
+
+Usage:
+ $ update_node.py <path_to_v8> <path_to_node>
+
+ This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
+ and a few V8 dependencies require in Node.js. It will also update .gitignore
+ appropriately.
+
+Optional flags:
+ --gclient Run `gclient sync` on the V8 checkout before updating.
+ --commit Create commit with the updated V8 in the Node.js checkout.
+ --with-patch Also include currently staged files in the V8 checkout.
+"""
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+import stat
+import node_common
+
+TARGET_SUBDIR = os.path.join("deps", "v8")
+
+SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
+ ["testing", "gtest"],
+ ["third_party", "jinja2"],
+ ["third_party", "markupsafe"] ]
+
+DELETE_FROM_GITIGNORE = [ "/base",
+ "/testing/gtest",
+ "/third_party/jinja2",
+ "/third_party/markupsafe" ]
+
+# Node.js requires only a single header file from gtest to build V8.
+# Both jinja2 and markupsafe are required to generate part of the inspector.
+ADD_TO_GITIGNORE = [ "/testing/gtest/*",
+ "!/testing/gtest/include",
+ "/testing/gtest/include/*",
+ "!/testing/gtest/include/gtest",
+ "/testing/gtest/include/gtest/*",
+ "!/testing/gtest/include/gtest/gtest_prod.h",
+ "!/third_party/jinja2",
+ "!/third_party/markupsafe" ]
+
+def RunGclient(path):
+ assert os.path.isdir(path)
+ print ">> Running gclient sync"
+ subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
+
+def CommitPatch(options):
+ """Makes a dummy commit for the changes in the index.
+
+ On trybots, bot_updated applies the patch to the index. We commit it to make
+ the fake git clone fetch it into node.js. We can leave the commit, as
+ bot_update will ensure a clean state on each run.
+ """
+ print ">> Committing patch"
+ subprocess.check_call(
+ ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
+ "commit", "--allow-empty", "-m", "placeholder-commit"],
+ cwd=options.v8_path,
+ )
+
+def UpdateTarget(repository, options):
+ source = os.path.join(options.v8_path, *repository)
+ target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
+ print ">> Updating target directory %s" % target
+ print ">> from active branch at %s" % source
+ if not os.path.exists(target):
+ os.makedirs(target)
+ # Remove possible remnants of previous incomplete runs.
+ node_common.UninitGit(target)
+
+ git_commands = [
+ ["git", "init"], # initialize target repo
+ ["git", "remote", "add", "origin", source], # point to the source repo
+ ["git", "fetch", "origin", "HEAD"], # sync to the current branch
+ ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch
+ ["git", "clean", "-fd"], # delete removed files
+ ]
+ try:
+ for command in git_commands:
+ subprocess.check_call(command, cwd=target)
+ except:
+ raise
+ finally:
+ node_common.UninitGit(target)
+
+def UpdateGitIgnore(options):
+ file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
+ assert os.path.isfile(file_name)
+ print ">> Updating .gitignore with lines"
+ with open(file_name) as gitignore:
+ content = gitignore.readlines()
+ content = [x.strip() for x in content]
+ for x in DELETE_FROM_GITIGNORE:
+ if x in content:
+ print "- %s" % x
+ content.remove(x)
+ for x in ADD_TO_GITIGNORE:
+ if x not in content:
+ print "+ %s" % x
+ content.append(x)
+ content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
+ with open(file_name, "w") as gitignore:
+ for x in content:
+ gitignore.write("%s\n" % x)
+
+def CreateCommit(options):
+ print ">> Creating commit."
+ # Find git hash from source.
+ githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
+ cwd=options.v8_path).strip()
+ # Create commit at target.
+ git_commands = [
+ ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch
+ ["git", "add", "."], # add files
+ ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit
+ ]
+ for command in git_commands:
+ subprocess.check_call(command, cwd=options.node_path)
+
+def ParseOptions(args):
+ parser = argparse.ArgumentParser(description="Update V8 in Node.js")
+ parser.add_argument("v8_path", help="Path to V8 checkout")
+ parser.add_argument("node_path", help="Path to Node.js checkout")
+ parser.add_argument("--gclient", action="store_true", help="Run gclient sync")
+ parser.add_argument("--commit", action="store_true", help="Create commit")
+ parser.add_argument("--with-patch", action="store_true",
+ help="Apply also staged files")
+ options = parser.parse_args(args)
+ assert os.path.isdir(options.v8_path)
+ options.v8_path = os.path.abspath(options.v8_path)
+ assert os.path.isdir(options.node_path)
+ options.node_path = os.path.abspath(options.node_path)
+ return options
+
+def Main(args):
+ options = ParseOptions(args)
+ if options.gclient:
+ RunGclient(options.v8_path)
+ # Commit patch on trybots to main V8 repository.
+ if options.with_patch:
+ CommitPatch(options)
+ # Update main V8 repository.
+ UpdateTarget([""], options)
+ # Patch .gitignore before updating sub-repositories.
+ UpdateGitIgnore(options)
+ for repo in SUB_REPOSITORIES:
+ UpdateTarget(repo, options)
+ if options.commit:
+ CreateCommit(options)
+
+if __name__ == "__main__":
+ Main(sys.argv[1:])
diff --git a/deps/v8/tools/parse-processor b/deps/v8/tools/parse-processor
new file mode 100755
index 0000000000..588f120b4e
--- /dev/null
+++ b/deps/v8/tools/parse-processor
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if ! expr "X${arg}" : "^X-" > /dev/null; then
+ log_file=${arg}
+ fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+ D8_PATH=`pwd`/out.gn/optdebug
+ d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ echo "d8 shell not found in $D8_PATH"
+ echo "To build, execute 'make native' from the V8 directory"
+ exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec --trace-maps --allow-natives-syntax --trace-deopt $tools_path/splaytree.js $tools_path/codemap.js \
+ $tools_path/csvparser.js $tools_path/consarray.js \
+ $tools_path/profile.js $tools_path/profile_view.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/parse-processor.js $tools_path/SourceMap.js \
+ $tools_path/parse-processor-driver.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/parse-processor-driver.js b/deps/v8/tools/parse-processor-driver.js
new file mode 100644
index 0000000000..f8f0c15254
--- /dev/null
+++ b/deps/v8/tools/parse-processor-driver.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function processArguments(args) {
+ var processor = new ArgumentsProcessor(args);
+ if (processor.parse()) {
+ return processor.result();
+ } else {
+ processor.printUsageAndExit();
+ }
+}
+
+function initSourceMapSupport() {
+ // Pull dev tools source maps into our name space.
+ SourceMap = WebInspector.SourceMap;
+
+ // Overwrite the load function to load scripts synchronously.
+ SourceMap.load = function(sourceMapURL) {
+ var content = readFile(sourceMapURL);
+ var sourceMapObject = (JSON.parse(content));
+ return new SourceMap(sourceMapURL, sourceMapObject);
+ };
+}
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+ initSourceMapSupport();
+ sourceMap = SourceMap.load(params.sourceMap);
+}
+var parseProcessor = new ParseProcessor();
+parseProcessor.processLogFile(params.logFileName);
diff --git a/deps/v8/tools/parse-processor.html b/deps/v8/tools/parse-processor.html
new file mode 100644
index 0000000000..e41fffbd5f
--- /dev/null
+++ b/deps/v8/tools/parse-processor.html
@@ -0,0 +1,337 @@
+<html>
+<!--
+Copyright 2016 the V8 project authors. All rights reserved. Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+
+<head>
+<style>
+ html {
+ font-family: monospace;
+ }
+
+ .parse {
+ background-color: red;
+ border: 1px red solid;
+ }
+
+ .preparse {
+ background-color: orange;
+ border: 1px orange solid;
+ }
+
+ .resolution {
+ background-color: green;
+ border: 1px green solid;
+ }
+
+ .execution {
+ background-color: black;
+ border-left: 2px black solid;
+ z-index: -1;
+ }
+
+ .script {
+ margin-top: 1em;
+ overflow: visible;
+ clear: both;
+ border-top: 2px black dotted;
+ }
+ .script h3 {
+ height: 20px;
+ margin-bottom: 0.5em;
+ white-space: nowrap;
+ }
+
+ .script-details {
+ float: left;
+ }
+
+ .chart {
+ float: left;
+ margin-right: 2em;
+ }
+
+ .funktion-list {
+ float: left;
+ height: 400px;
+ }
+
+ .funktion-list > ul {
+ height: 80%;
+ overflow-y: scroll;
+ }
+
+
+ .funktion {
+ }
+
+</style>
+<script src="./splaytree.js" type="text/javascript"></script>
+<script src="./codemap.js" type="text/javascript"></script>
+<script src="./csvparser.js" type="text/javascript"></script>
+<script src="./consarray.js" type="text/javascript"></script>
+<script src="./profile.js" type="text/javascript"></script>
+<script src="./profile_view.js" type="text/javascript"></script>
+<script src="./logreader.js" type="text/javascript"></script>
+<script src="./arguments.js" type="text/javascript"></script>
+<script src="./parse-processor.js" type="text/javascript"></script>
+<script src="./SourceMap.js" type="text/javascript"></script>
+<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+<script type="text/javascript">
+"use strict";
+google.charts.load('current', {packages: ['corechart']});
+
+function $(query) {
+ return document.querySelector(query);
+}
+
+
+function loadFile() {
+ let files = $('#uploadInput').files;
+
+ let file = files[0];
+ let reader = new FileReader();
+
+ reader.onload = function(evt) {
+ const kTimerName = 'parse log file';
+ console.time(kTimerName);
+ let parseProcessor = new ParseProcessor();
+ parseProcessor.processString(this.result);
+ console.timeEnd(kTimerName);
+ renderParseResults(parseProcessor);
+ document.parseProcessor = parseProcessor;
+ }
+ reader.readAsText(file);
+}
+
+function handleOnLoad() {
+ document.querySelector("#uploadInput").focus();
+}
+
+function createNode(tag, classNames) {
+ let node = document.createElement(tag);
+ if (classNames) {
+ if (Array.isArray(classNames)) {
+ node.classList.add(...classNames);
+ } else {
+ node.className = classNames;
+ }
+ }
+ return node;
+}
+
+function div(...args) {
+ return createNode('div', ...args);
+}
+
+function h1(string) {
+ let node = createNode('h1');
+ node.appendChild(text(string));
+ return node;
+}
+
+function h3(string, ...args) {
+ let node = createNode('h3', ...args);
+ if (string) node.appendChild(text(string));
+ return node;
+}
+
+function a(href, string, ...args) {
+ let link = createNode('a', ...args);
+ if (href.length) link.href = href;
+ if (string) link.appendChild(text(string));
+ return link;
+}
+
+function text(string) {
+ return document.createTextNode(string);
+}
+
+function delay(t) {
+ return new Promise(resolve = > setTimeout(resolve, t));
+}
+
+function renderParseResults(parseProcessor) {
+ let result = $('#result');
+ // clear out all existing result pages;
+ result.innerHTML = '';
+ const start = parseProcessor.firstEvent;
+ const end = parseProcessor.lastEvent;
+ renderScript(result, parseProcessor.totalScript, start, end);
+ // Build up the graphs lazily to keep the page responsive.
+ parseProcessor.scripts.forEach(
+ script => renderScript(result, script, start, end));
+ // Install an intersection observer to lazily load the graphs when the script
+ // div becomes visible for the first time.
+ var io = new IntersectionObserver((entries, observer) => {
+ entries.forEach(entry => {
+ if (entry.intersectionRatio == 0) return;
+ console.assert(!entry.target.querySelector('.graph'));
+ let target = entry.target;
+ appendGraph(target.script, target, start, end);
+ observer.unobserve(entry.target);
+ });
+ }, {});
+ document.querySelectorAll('.script').forEach(div => io.observe(div));
+}
+
+
+const kTimeFactor = 10;
+const kHeight = 20;
+const kFunktionTopOffset = 50;
+
+function renderScript(result, script, start, end) {
+ // Filter out empty scripts.
+ if (script.isEmpty() || script.lastParseEvent == 0) return;
+
+ let scriptDiv = div('script');
+ scriptDiv.script = script;
+
+ let scriptTitle = h3();
+ if (script.file) scriptTitle.appendChild(a(script.file, script.file));
+ let anchor = a("", ' id=' + script.id);
+ anchor.name = "script"+script.id
+ scriptTitle.appendChild(anchor);
+ scriptDiv.appendChild(scriptTitle);
+ let summary = createNode('pre', 'script-details');
+ summary.appendChild(text(script.summary));
+ scriptDiv.appendChild(summary);
+ result.appendChild(scriptDiv);
+ return scriptDiv;
+}
+
+const kMaxTime = 120 * kSecondsToMillis;
+// Resolution of the graphs
+const kTimeIncrement = 1;
+const kSelectionTimespan = 2;
+const series = [
+// ['firstParseEvent', 'Any Parse Event'],
+ ['parse', 'Parsing'],
+// ['preparse', 'Preparsing'],
+// ['resolution', 'Preparsing with Var. Resolution'],
+ ['lazyCompile', 'Lazy Compilation'],
+ ['compile', 'Eager Compilation'],
+ ['execution', 'First Execution'],
+];
+const metricNames = series.map(each => each[0]);
+
+
+function appendGraph(script, parentNode, start, end) {
+ const timerLabel = 'graph script=' + script.id;
+ // TODO(cbruni): add support for network events
+
+ console.time(timerLabel);
+ let data = new google.visualization.DataTable();
+ data.addColumn('number', 'Time');
+ // The series are interleave bytes processed, time spent and thus have two
+ // different vAxes.
+ let seriesOptions = [];
+ series.forEach(each => {
+ let description = each[1];
+ // Add the bytes column.
+ data.addColumn('number', description + ' Bytes');
+ seriesOptions.push({targetAxisIndex: 0});
+ // Add the time column.
+ data.addColumn('number', description + ' Time');
+ seriesOptions.push({targetAxisIndex: 1, lineDashStyle: [3, 2]});
+ });
+ // The first entry contains the total.
+ seriesOptions[0].type = 'area';
+
+ const maxTime = Math.min(kMaxTime, end);
+ console.time('metrics');
+ let metricValues =
+ script.getAccumulatedTimeMetrics(metricNames , 0, maxTime, kTimeIncrement);
+ console.timeEnd('metrics');
+ console.assert(metricValues[0].length == seriesOptions.length + 1);
+ data.addRows(metricValues);
+
+ let options = {
+ explorer: {
+ actions: ['dragToZoom', 'rightClickToReset'],
+ maxZoomIn: 0.01
+ },
+ hAxis: {
+ format: '#,###.##s'
+ },
+ vAxes: {
+ 0: {title: 'Bytes Touched', format: 'short'},
+ 1: {title: 'Time', format: '#,###ms'}
+ },
+ height: 400,
+ width: 1000,
+ chartArea: {left: '5%', top: '15%', width: "85%", height: "75%"},
+ // The first series should be a area chart (total bytes touched),
+ series: seriesOptions,
+ // everthing else is a line.
+ seriesType: 'line'
+ };
+ let graphNode = createNode('div', 'chart');
+ let listNode = createNode('div', 'funktion-list');
+ parentNode.appendChild(graphNode);
+ parentNode.appendChild(listNode);
+ let chart = new google.visualization.ComboChart(graphNode);
+ google.visualization.events.addListener(chart, 'select',
+ () => selectGraphPointHandler(chart, data, script, parentNode));
+ chart.draw(data, options);
+ console.timeEnd(timerLabel);
+}
+
+
+function selectGraphPointHandler(chart, data, script, parentNode) {
+ let selection = chart.getSelection();
+ if (selection.length <= 0) return;
+ // Display a list of funktions with events at the given time.
+ let {row, column} = selection[0];
+ if (row === null|| column === null) return;
+ let name = series[((column-1)/2) | 0][0];
+ let time = data.getValue(row, 0);
+ let funktions = script.getFunktionsAtTime(
+ time * kSecondsToMillis, kSelectionTimespan, name);
+ let oldList = parentNode.querySelector('.funktion-list');
+ parentNode.replaceChild(createFunktionList(name, time, funktions), oldList);
+}
+
+function createFunktionList(metric, time, funktions) {
+ let container = createNode('div', 'funktion-list');
+ container.appendChild(h3('Changes of ' + metric + ' at ' +
+ time + 's: ' + funktions.length));
+ let listNode = createNode('ul');
+ funktions.forEach(funktion => {
+ let node = createNode('li', 'funktion');
+ node.funktion = funktion;
+ node.appendChild(text(funktion.toString(false) + " "));
+ let script = funktion.script;
+ if (script) {
+ node.appendChild(a("#script" + script.id, "in script " + script.id));
+ }
+ listNode.appendChild(node);
+ });
+ container.appendChild(listNode);
+ return container;
+}
+
+
+</script>
+</head>
+
+<body onload="handleOnLoad()">
+ <h1>BEHOLD, THIS IS PARSEROR!</h1>
+
+ <h2>Usage</h2>
+ Run your script with <code>--log-function-events</code> and upload <code>v8.log</code> on this page:<br/>
+ <code>/path/to/d8 --log-function-events your_script.js</code>
+
+ <h2>Data</h2>
+ <form name="fileForm">
+ <p>
+ <input id="uploadInput" type="file" name="files" onchange="loadFile();"> trace entries: <span id="count">0</span>
+ </p>
+ </form>
+
+ <h2>Result</h2>
+ <div id="result"></div>
+</body>
+
+</html>
diff --git a/deps/v8/tools/parse-processor.js b/deps/v8/tools/parse-processor.js
new file mode 100644
index 0000000000..30b593a156
--- /dev/null
+++ b/deps/v8/tools/parse-processor.js
@@ -0,0 +1,918 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+"use strict";
+
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+function readFile(fileName) {
+ try {
+ return read(fileName);
+ } catch (e) {
+ console.log(fileName + ': ' + (e.message || e));
+ throw e;
+ }
+}
+
+// ===========================================================================
+
+// This is the only true formatting, why? For an international audience the
+// confusion between the decimal and thousands separator is big (alternating
+// between comma "," vs dot "."). The Swiss formatting uses "'" as a thousands
+// separator, dropping most of that confusion.
+var numberFormat = new Intl.NumberFormat('de-CH', {
+ maximumFractionDigits: 2,
+ minimumFractionDigits: 2,
+});
+
+function formatNumber(value) {
+ return formatNumber(value);
+}
+
+function BYTES(bytes, total) {
+ let units = ['B ', 'kB', 'mB', 'gB'];
+ let unitIndex = 0;
+ let value = bytes;
+ while (value > 1000 && unitIndex < units.length) {
+ value /= 1000;
+ unitIndex++;
+ }
+ let result = formatNumber(value).padStart(10) + ' ' + units[unitIndex];
+ if (total !== void 0 && total != 0) {
+ result += PERCENT(bytes, total).padStart(5);
+ }
+ return result;
+}
+
+function PERCENT(value, total) {
+ return Math.round(value / total * 100) + "%";
+}
+
+function timestampMin(list) {
+ let result = -1;
+ list.forEach(timestamp => {
+ if (result === -1) {
+ result = timestamp;
+ } else if (timestamp != -1) {
+ result = Math.min(result, timestamp);
+ }
+ });
+ return Math.round(result);
+}
+
+
+// ===========================================================================
+class Script {
+ constructor(file, id) {
+ this.file = file;
+ this.isNative = false;
+ this.id = id;
+ if (id === void 0 || id <= 0) {
+ throw new Error(`Invalid id=${id} for script with file='${file}'`);
+ }
+ this.isEval = false;
+ this.funktions = [];
+ this.metrics = new Map();
+ this.maxNestingLevel = 0;
+
+ this.firstEvent = -1;
+ this.firstParseEvent = -1;
+ this.lastParseEvent = -1;
+ this.executionTimestamp = -1;
+ this.compileTimestamp = -1;
+ this.lastEvent = -1;
+
+ this.compileTime = -0.0;
+
+ this.width = 0;
+ this.bytesTotal = 0;
+ this.ownBytes = -1;
+ this.finalized = false;
+ this.summary = '';
+ this.setFile(file);
+ }
+
+ setFile(name) {
+ this.file = name;
+ this.isNative = name.startsWith('native ');
+ }
+
+ isEmpty() {
+ return this.funktions.length === 0
+ }
+
+ funktionAtPosition(start) {
+ if (start === 0) throw "position 0 is reserved for the script";
+ if (this.finalized) throw 'Finalized script has no source position!';
+ return this.funktions[start];
+ }
+
+ addMissingFunktions(list) {
+ if (this.finalized) throw 'script is finalized!';
+ list.forEach(fn => {
+ if (this.funktions[fn.start] === void 0) {
+ this.addFunktion(fn);
+ }
+ });
+ }
+
+ addFunktion(fn) {
+ if (this.finalized) throw 'script is finalized!';
+ if (fn.start === void 0) throw "Funktion has no start position";
+ if (this.funktions[fn.start] !== void 0) {
+ fn.print();
+ throw "adding same function twice to script";
+ }
+ this.funktions[fn.start] = fn;
+ }
+
+ finalize() {
+ this.finalized = true;
+ // Compact funktions as we no longer need access via start byte position.
+ this.funktions = this.funktions.filter(each => true);
+ let parent = null;
+ let maxNesting = 0;
+ // Iterate over the Funktions in byte position order.
+ this.funktions.forEach(fn => {
+ fn.fromEval = this.isEval;
+ if (parent === null) {
+ parent = fn;
+ } else {
+ // Walk up the nested chain of Funktions to find the parent.
+ while (parent !== null && !fn.isNestedIn(parent)) {
+ parent = parent.parent;
+ }
+ fn.parent = parent;
+ if (parent) {
+ maxNesting = Math.max(maxNesting, parent.addNestedFunktion(fn));
+ }
+ parent = fn;
+ }
+ this.firstParseEvent = this.firstParseEvent === -1 ?
+ fn.getFirstParseEvent() :
+ Math.min(this.firstParseEvent, fn.getFirstParseEvent());
+ this.lastParseEvent =
+ Math.max(this.lastParseEvent, fn.getLastParseEvent());
+ fn.getFirstEvent();
+ if (Number.isNaN(this.lastEvent)) throw "Invalid lastEvent";
+ this.lastEvent = Math.max(this.lastEvent, fn.getLastEvent());
+ if (Number.isNaN(this.lastEvent)) throw "Invalid lastEvent";
+ });
+ this.maxNestingLevel = maxNesting;
+ this.getFirstEvent();
+ }
+
+ print() {
+ console.log(this.toString());
+ }
+
+ toString() {
+ let str = `SCRIPT id=${this.id} file=${this.file}\n` +
+ `functions[${this.funktions.length}]:`;
+ this.funktions.forEach(fn => str += fn.toString());
+ return str;
+ }
+
+ getBytes() {
+ return this.bytesTotal;
+ }
+
+ getOwnBytes() {
+ if (this.ownBytes === -1) {
+ this.ownBytes = this.funktions.reduce(
+ (bytes, each) => bytes - each.parent == null ? each.getBytes() : 0,
+ this.getBytes());
+ if (this.ownBytes < 0) throw "Own bytes must be positive";
+ }
+ return this.ownBytes;
+ }
+
+ // Also see Funktion.prototype.getMetricBytes
+ getMetricBytes(name) {
+ if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
+ return this.getBytes();
+ }
+
+ getMetricTime(name) {
+ return this[name];
+ }
+
+ forEach(fn) {
+ fn(this);
+ this.funktions.forEach(fn);
+ }
+
+ // Container helper for TotalScript / Script.
+ getScripts() {
+ return [this];
+ }
+
+ calculateMetrics(printSummary) {
+ let log = (str) => this.summary += str + '\n';
+ log("SCRIPT: " + this.id);
+ let all = this.funktions;
+ if (all.length === 0) return;
+
+ let nofFunktions = all.length;
+ let ownBytesSum = list => {
+ return list.reduce((bytes, each) => bytes + each.getOwnBytes(), 0)
+ };
+
+ let info = (name, funktions) => {
+ let ownBytes = ownBytesSum(funktions);
+ let nofPercent = Math.round(funktions.length / nofFunktions * 100);
+ let value = (funktions.length + "").padStart(6) +
+ (nofPercent + "%").padStart(5) +
+ BYTES(ownBytes, this.bytesTotal).padStart(10);
+ log((" - " + name).padEnd(20) + value);
+ this.metrics.set(name + "-bytes", ownBytes);
+ this.metrics.set(name + "-count", funktions.length);
+ this.metrics.set(name + "-count-percent", nofPercent);
+ this.metrics.set(name + "-bytes-percent",
+ Math.round(ownBytes / this.bytesTotal * 100));
+ };
+
+ log(" - file: " + this.file);
+ info("scripts", this.getScripts());
+ info("functions", all);
+ info("toplevel fn", all.filter(each => each.isToplevel()));
+ info("preparsed", all.filter(each => each.preparseTime > 0));
+
+
+ info("fully parsed", all.filter(each => each.parseTime > 0));
+ // info("fn parsed", all.filter(each => each.parse2Time > 0));
+ // info("resolved", all.filter(each => each.resolutionTime > 0));
+ info("executed", all.filter(each => each.executionTimestamp > 0));
+ info("forEval", all.filter(each => each.fromEval));
+ info("lazy compiled", all.filter(each => each.lazyCompileTimestamp > 0));
+ info("eager compiled", all.filter(each => each.compileTimestamp > 0));
+
+ let parsingCost = new ExecutionCost('parse', all,
+ each => each.parseTime);
+ parsingCost.setMetrics(this.metrics);
+ log(parsingCost.toString())
+
+ let preParsingCost = new ExecutionCost('preparse', all,
+ each => each.preparseTime);
+ preParsingCost.setMetrics(this.metrics);
+ log(preParsingCost.toString())
+
+ let resolutionCost = new ExecutionCost('resolution', all,
+ each => each.resolutionTime);
+ resolutionCost.setMetrics(this.metrics);
+ log(resolutionCost.toString())
+
+ let nesting = new NestingDistribution(all);
+ nesting.setMetrics(this.metrics);
+ log(nesting.toString())
+
+ if (printSummary) console.log(this.summary);
+ }
+
+ getAccumulatedTimeMetrics(metrics, start, end, delta, incremental = false) {
+ // Returns an array of the following format:
+ // [ [start, acc(metric0, start, start), acc(metric1, ...), ...],
+ // [start+delta, acc(metric0, start, start+delta), ...],
+ // [start+delta*2, acc(metric0, start, start+delta*2), ...],
+ // ...
+ // ]
+ const timespan = end - start;
+ const kSteps = Math.ceil(timespan / delta);
+ // To reduce the time spent iterating over the funktions of this script
+ // we iterate once over all funktions and add the metric changes to each
+ // timepoint:
+ // [ [0, 300, ...], [1, 15, ...], [2, 100, ...], [3, 0, ...] ... ]
+ // In a second step we accumulate all values:
+ // [ [0, 300, ...], [1, 315, ...], [2, 415, ...], [3, 415, ...] ... ]
+ //
+ // To limit the number of data points required in the resulting graphs,
+ // only the rows for entries with actual changes are created.
+
+ const metricProperties = ["time"];
+ metrics.forEach(each => {
+ metricProperties.push(each + 'Timestamp');
+ metricProperties.push(each + 'Time');
+ });
+ // Create a packed {rowTemplate} which is copied later-on.
+ let indexToTime = (t) => (start + t * delta) / kSecondsToMillis;
+ let rowTemplate = [indexToTime(0)];
+ for (let i = 1; i < metricProperties.length; i++) rowTemplate.push(0.0);
+ // Create rows with 0-time entry.
+ let rows = new Array(rowTemplate.slice());
+ for (let t = 1; t <= kSteps; t++) rows.push(null);
+ // Create the real metric's property name on the Funktion object.
+ // Add the increments of each Funktion's metric to the result.
+ this.forEach(funktionOrScript => {
+ // Iterate over the Funktion's metric names, position 0 is the time.
+ for (let i = 1; i < metricProperties.length; i += 2) {
+ let property = metricProperties[i];
+ let timestamp = funktionOrScript[property];
+ if (timestamp === void 0) continue;
+ if (timestamp < 0 || end < timestamp) continue;
+ let index = Math.floor(timestamp / delta);
+ let row = rows[index];
+ if (row === null) {
+ // Add a new row if it didn't exist,
+ row = rows[index] = rowTemplate.slice();
+ // .. add the time offset.
+ row[0] = indexToTime(index);
+ }
+ // Add the metric value.
+ row[i] += funktionOrScript.getMetricBytes(property);
+ let timeMetricName = metricProperties[i + 1];
+ row[i + 1] += funktionOrScript.getMetricTime(timeMetricName);
+ }
+ });
+ // Create a packed array again with only the valid entries.
+ // Accumulate the incremental results by adding the metric values from
+ // the previous time window.
+ let previous = rows[0];
+ let result = [previous];
+ for (let t = 1; t < rows.length; t++) {
+ let current = rows[t];
+ if (current === null) {
+ // Ensure a zero data-point after each non-zero point.
+ if (incremental && rows[t - 1] !== null) {
+ let duplicate = rowTemplate.slice();
+ duplicate[0] = indexToTime(t);
+ result.push(duplicate);
+ }
+ continue;
+ }
+ if (!incremental) {
+ // Skip i==0 where the corresponding time value in seconds is.
+ for (let i = 1; i < metricProperties.length; i++) {
+ current[i] += previous[i];
+ }
+ }
+ // Make sure we have a data-point in time right before the current one.
+ if (rows[t - 1] === null) {
+ let duplicate = (incremental ? rowTemplate : previous).slice();
+ duplicate[0] = indexToTime(t - 1);
+ result.push(duplicate);
+ }
+ previous = current;
+ result.push(current);
+ }
+ // Make sure there is an entry at the last position to make sure all graphs
+ // have the same width.
+ const lastIndex = rows.length - 1;
+ if (rows[lastIndex] === null) {
+ let duplicate = previous.slice();
+ duplicate[0] = indexToTime(lastIndex);
+ result.push(duplicate);
+ }
+ return result;
+ }
+
+ getFunktionsAtTime(time, delta, metric) {
+ // Returns a list of Funktions whose metric changed in the
+ // [time-delta, time+delta] range.
+ return this.funktions.filter(
+ funktion => funktion.didMetricChange(time, delta, metric));
+ return result;
+ }
+
+ getFirstEvent() {
+ if (this.firstEvent === -1) {
+ // TODO(cbruni): add support for network request timestanp
+ this.firstEvent = this.firstParseEvent;
+ }
+ return this.firstEvent;
+ }
+}
+
+
+class TotalScript extends Script {
+ constructor() {
+ super('all files', 'all files');
+ this.scripts = [];
+ }
+
+ addAllFunktions(script) {
+ // funktions is indexed by byte offset and as such not packed. Add every
+ // Funktion one by one to keep this.funktions packed.
+ script.funktions.forEach(fn => this.funktions.push(fn));
+ this.scripts.push(script);
+ this.bytesTotal += script.bytesTotal;
+ }
+
+ // Iterate over all Scripts and nested Funktions.
+ forEach(fn) {
+ this.scripts.forEach(script => script.forEach(fn));
+ }
+
+ getScripts() {
+ return this.scripts;
+ }
+}
+
+
+// ===========================================================================
+
+class NestingDistribution {
+ constructor(funktions) {
+ // Stores the nof bytes per function nesting level.
+ this.accumulator = [0, 0, 0, 0, 0];
+ // Max nof bytes encountered at any nesting level.
+ this.max = 0;
+ // avg bytes per nesting level.
+ this.avg = 0;
+ this.totalBytes = 0;
+
+ funktions.forEach(each => each.accumulateNestingLevel(this.accumulator));
+ this.max = this.accumulator.reduce((max, each) => Math.max(max, each), 0);
+ this.totalBytes = this.accumulator.reduce((sum, each) => sum + each, 0);
+ for (let i = 0; i < this.accumulator.length; i++) {
+ this.avg += this.accumulator[i] * i;
+ }
+ this.avg /= this.totalBytes;
+ }
+
+ print() {
+ console.log(this.toString())
+ }
+
+ toString() {
+ let ticks = " ▁▂▃▄▅▆▇█";
+ let accString = this.accumulator.reduce((str, each) => {
+ let index = Math.round(each / this.max * (ticks.length - 1));
+ return str + ticks[index];
+ }, '');
+ let percent0 = this.accumulator[0]
+ let percent1 = this.accumulator[1];
+ let percent2plus = this.accumulator.slice(2)
+ .reduce((sum, each) => sum + each, 0);
+ return " - nesting level: " +
+ ' avg=' + formatNumber(this.avg) +
+ ' l0=' + PERCENT(percent0, this.totalBytes) +
+ ' l1=' + PERCENT(percent1, this.totalBytes) +
+ ' l2+=' + PERCENT(percent2plus, this.totalBytes) +
+ ' distribution=[' + accString + ']';
+
+ }
+
+ setMetrics(dict) {}
+}
+
+class ExecutionCost {
+ constructor(prefix, funktions, time_fn) {
+ this.prefix = prefix;
+ // Time spent on executed functions.
+ this.executedCost = 0
+ // Time spent on not executed functions.
+ this.nonExecutedCost = 0;
+
+ this.executedCost = funktions.reduce((sum, each) => {
+ return sum + (each.hasBeenExecuted() ? time_fn(each) : 0)
+ }, 0);
+ this.nonExecutedCost = funktions.reduce((sum, each) => {
+ return sum + (each.hasBeenExecuted() ? 0 : time_fn(each))
+ }, 0);
+
+ }
+
+ print() {
+ console.log(this.toString())
+ }
+
+ toString() {
+ return (' - ' + this.prefix + '-time:').padEnd(24) +
+ (" executed=" + formatNumber(this.executedCost) + 'ms').padEnd(20) +
+ " non-executed=" + formatNumber(this.nonExecutedCost) + 'ms';
+ }
+
+ setMetrics(dict) {
+ dict.set('parseMetric', this.executionCost);
+ dict.set('parseMetricNegative', this.nonExecutionCost);
+ }
+}
+
+// ===========================================================================
+const kNoTimeMetrics = {
+ __proto__: null,
+ executionTime: 0,
+ firstEventTimestamp: 0,
+ firstParseEventTimestamp: 0,
+ lastParseTimestamp: 0,
+ lastEventTimestamp: 0
+};
+
+class Funktion {
+ constructor(name, start, end, script) {
+ if (start < 0) throw "invalid start position: " + start;
+ if (end <= 0) throw "invalid end position: " + end;
+ if (end <= start) throw "invalid start end positions";
+
+ this.name = name;
+ this.start = start;
+ this.end = end;
+ this.ownBytes = -1;
+ this.script = script;
+ this.parent = null;
+ this.fromEval = false;
+ this.nested = [];
+ this.nestingLevel = 0;
+
+ this.preparseTimestamp = -1;
+ this.parseTimestamp = -1;
+ this.parse2Timestamp = -1;
+ this.resolutionTimestamp = -1;
+ this.lazyCompileTimestamp = -1;
+ this.compileTimestamp = -1;
+ this.executionTimestamp = -1;
+
+ this.preparseTime = -0.0;
+ this.parseTime = -0.0;
+ this.parse2Time = -0.0;
+ this.resolutionTime = -0.0;
+ this.scopeResolutionTime = -0.0;
+ this.lazyCompileTime = -0.0;
+ this.compileTime = -0.0;
+
+ // Lazily computed properties.
+ this.firstEventTimestamp = -1;
+ this.firstParseEventTimestamp = -1;
+ this.lastParseTimestamp = -1;
+ this.lastEventTimestamp = -1;
+
+ if (script) this.script.addFunktion(this);
+ }
+
+ getMetricBytes(name) {
+ if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
+ return this.getBytes();
+ }
+
+ getMetricTime(name) {
+ if (name in kNoTimeMetrics) return 0;
+ return this[name];
+ }
+
+ getFirstEvent() {
+ if (this.firstEventTimestamp === -1) {
+ this.firstEventTimestamp = timestampMin(
+ [this.parseTimestamp, this.preparseTimestamp,
+ this.resolutionTimestamp, this.executionTimestamp
+ ]);
+ if (!(this.firstEventTimestamp > 0)) {
+ this.firstEventTimestamp = 0;
+ }
+ }
+ return this.firstEventTimestamp;
+ }
+
+ getFirstParseEvent() {
+ if (this.firstParseEventTimestamp === -1) {
+ this.firstParseEventTimestamp = timestampMin(
+ [this.parseTimestamp, this.preparseTimestamp,
+ this.resolutionTimestamp
+ ]);
+ if (!(this.firstParseEventTimestamp > 0)) {
+ this.firstParseEventTimestamp = 0;
+ }
+ }
+ return this.firstParseEventTimestamp;
+ }
+
+ getLastParseEvent() {
+ if (this.lastParseTimestamp === -1) {
+ this.lastParseTimestamp = Math.max(
+ this.preparseTimestamp + this.preparseTime,
+ this.parseTimestamp + this.parseTime,
+ this.resolutionTimestamp + this.resolutionTime);
+ if (!(this.lastParseTimestamp > 0)) {
+ this.lastParseTimestamp = 0;
+ }
+ }
+ return this.lastParseTimestamp;
+ }
+
+ getLastEvent() {
+ if (this.lastEventTimestamp === -1) {
+ this.lastEventTimestamp = Math.max(
+ this.getLastParseEvent(), this.executionTimestamp);
+ if (!(this.lastEventTimestamp > 0)) {
+ this.lastEventTimestamp = 0;
+ }
+ }
+ return this.lastEventTimestamp;
+ }
+
+ isNestedIn(funktion) {
+ if (this.script != funktion.script) throw "Incompatible script";
+ return funktion.start < this.start && this.end <= funktion.end;
+ }
+
+ isToplevel() {
+ return this.parent === null
+ }
+
+ hasBeenExecuted() {
+ return this.executionTimestamp > 0
+ }
+
+ accumulateNestingLevel(accumulator) {
+ let value = accumulator[this.nestingLevel] || 0;
+ accumulator[this.nestingLevel] = value + this.getOwnBytes();
+ }
+
+ addNestedFunktion(child) {
+ if (this.script != child.script) throw "Incompatible script";
+ if (child == null) throw "Nesting non child";
+ this.nested.push(child);
+ if (this.nested.length > 1) {
+ // Make sure the nested children don't overlap and have been inserted in
+ // byte start position order.
+ let last = this.nested[this.nested.length - 2];
+ if (last.end > child.start || last.start > child.start ||
+ last.end > child.end || last.start > child.end) {
+ throw "Wrongly nested child added";
+ }
+ }
+ child.nestingLevel = this.nestingLevel + 1;
+ return child.nestingLevel;
+ }
+
+ getBytes() {
+ return this.end - this.start;
+ }
+
+ getOwnBytes() {
+ if (this.ownBytes === -1) {
+ this.ownBytes = this.nested.reduce(
+ (bytes, each) => bytes - each.getBytes(),
+ this.getBytes());
+ if (this.ownBytes < 0) throw "Own bytes must be positive";
+ }
+ return this.ownBytes;
+ }
+
+ didMetricChange(time, delta, name) {
+ let value = this[name + 'Timestamp'];
+ return (time - delta) <= value && value <= (time + delta);
+ }
+
+ print() {
+ console.log(this.toString());
+ }
+
+ toString(details = true) {
+ let result = 'function' + (this.name ? ' ' + this.name : '') +
+ `() range=${this.start}-${this.end}`;
+ if (details) result += ` script=${this.script ? this.script.id : 'X'}`;
+ return result;
+ }
+}
+
+
+// ===========================================================================
+
+const kTimestampFactor = 1000;
+const kSecondsToMillis = 1000;
+
+function toTimestamp(microseconds) {
+ return microseconds / kTimestampFactor
+}
+
+function startOf(timestamp, time) {
+ let result = toTimestamp(timestamp) - time;
+ if (result < 0) throw "start timestamp cannnot be negative";
+ return result;
+}
+
+
+class ParseProcessor extends LogReader {
+ constructor() {
+ super();
+ let config = (processor) => {
+ // {script file},{script id},{start position},{end position},
+ // {time},{timestamp},{function name}
+ return {
+ parsers: [null, parseInt, parseInt, parseInt, parseFloat, parseInt, null],
+ processor: processor
+ }
+ };
+
+ this.dispatchTable_ = {
+ 'parse-full': config(this.processFull),
+ 'parse-function': config(this.processFunction),
+ 'parse-script': config(this.processScript),
+ 'parse-eval': config(this.processEval),
+ 'preparse-no-resolution': config(this.processPreparseNoResolution),
+ 'preparse-resolution': config(this.processPreparseResolution),
+ 'first-execution': config(this.processFirstExecution),
+ 'compile-lazy': config(this.processCompileLazy),
+ 'compile': config(this.processCompile)
+ };
+
+ this.idToScript = new Map();
+ this.fileToScript = new Map();
+ this.nameToFunction = new Map();
+ this.scripts = [];
+ this.totalScript = new TotalScript();
+ this.firstEvent = -1;
+ this.lastParseEvent = -1;
+ this.lastEvent = -1;
+ }
+
+ print() {
+ console.log("scripts:");
+ this.idToScript.forEach(script => script.print());
+ }
+
+ processString(string) {
+ let end = string.length;
+ let current = 0;
+ let next = 0;
+ let line;
+ let i = 0;
+ let entry;
+ while (current < end) {
+ next = string.indexOf("\n", current);
+ if (next === -1) break;
+ i++;
+ line = string.substring(current, next);
+ current = next + 1;
+ this.processLogLine(line);
+ }
+ this.postProcess();
+ }
+
+ processLogFile(fileName) {
+ this.collectEntries = true
+ this.lastLogFileName_ = fileName;
+ var line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+ this.postProcess();
+ }
+
+ postProcess() {
+ this.scripts = Array.from(this.idToScript.values())
+ .filter(each => !each.isNative);
+
+ this.scripts.forEach(script => script.finalize());
+ this.scripts.forEach(script => script.calculateMetrics(false));
+
+ this.firstEvent =
+ timestampMin(this.scripts.map(each => each.firstEvent));
+ this.lastParseEvent = this.scripts.reduce(
+ (max, script) => Math.max(max, script.lastParseEvent), -1);
+ this.lastEvent = this.scripts.reduce(
+ (max, script) => Math.max(max, script.lastEvent), -1);
+
+ this.scripts.forEach(script => this.totalScript.addAllFunktions(script));
+ this.totalScript.calculateMetrics(true);
+ const series = [
+ ['firstParseEvent', 'Any Parse Event'],
+ ['parse', 'Parsing'],
+ ['preparse', 'Preparsing'],
+ ['resolution', 'Preparsing with Var. Resolution'],
+ ['lazyCompile', 'Lazy Compilation'],
+ ['compile', 'Eager Compilation'],
+ ['execution', 'First Execution'],
+ ];
+ let metrics = series.map(each => each[0]);
+ this.totalScript.getAccumulatedTimeMetrics(metrics, 0, this.lastEvent, 10);
+ };
+
+ addEntry(entry) {
+ this.entries.push(entry);
+ }
+
+ lookupScript(file, id) {
+ // During preparsing we only have the temporary ranges and no script yet.
+ let script;
+ if (this.idToScript.has(id)) {
+ script = this.idToScript.get(id);
+ } else {
+ script = new Script(file, id);
+ this.idToScript.set(id, script);
+ }
+ if (file.length > 0 && script.file.length === 0) {
+ script.setFile(file);
+ this.fileToScript.set(file, script);
+ }
+ return script;
+ }
+
+ lookupFunktion(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let script = this.lookupScript(file, scriptId);
+ let funktion = script.funktionAtPosition(startPosition);
+ if (funktion === void 0) {
+ funktion = new Funktion(functionName, startPosition, endPosition, script);
+ }
+ return funktion;
+ }
+
+ processEval(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ let script = this.lookupScript(file, scriptId);
+ script.isEval = true;
+ }
+
+ processFull(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ // TODO(cbruni): this should never happen, emit differen event from the
+ // parser.
+ if (funktion.parseTimestamp > 0) return;
+ funktion.parseTimestamp = startOf(timestamp, time);
+ funktion.parseTime = time;
+ }
+
+ processFunction(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ funktion.parseTimestamp = startOf(timestamp, time);
+ funktion.parseTime = time;
+ }
+
+ processScript(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ // TODO timestamp and time
+ let script = this.lookupScript(file, scriptId);
+ let ts = startOf(timestamp, time);
+ script.parseTimestamp = ts;
+ script.firstEventTimestamp = ts;
+ script.firstParseEventTimestamp = ts;
+ script.parseTime = time;
+ }
+
+ processPreparseResolution(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ // TODO(cbruni): this should never happen, emit different event from the
+ // parser.
+ if (funktion.resolutionTimestamp > 0) return;
+ funktion.resolutionTimestamp = startOf(timestamp, time);
+ funktion.resolutionTime = time;
+ }
+
+ processPreparseNoResolution(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ funktion.preparseTimestamp = startOf(timestamp, time);
+ funktion.preparseTime = time;
+ }
+
+ processFirstExecution(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let script = this.lookupScript(file, scriptId);
+ if (startPosition === 0) {
+ // undefined = eval fn execution
+ if (script) {
+ script.executionTimestamp = toTimestamp(timestamp);
+ }
+ } else {
+ let funktion = script.funktionAtPosition(startPosition);
+ if (funktion) {
+ funktion.executionTimestamp = toTimestamp(timestamp);
+ } else if (functionName.length > 0) {
+ // throw new Error("Could not find function: " + functionName);
+ }
+ }
+ }
+
+ processCompileLazy(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ funktion.lazyCompileTimestamp = startOf(timestamp, time);
+ funktion.lazyCompileTime = time;
+ script.firstPar
+ }
+
+ processCompile(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+
+ let script = this.lookupScript(file, scriptId);
+ if (startPosition === 0) {
+ script.compileTimestamp = startOf(timestamp, time);
+ script.compileTime = time;
+ script.bytesTotal = endPosition;
+ } else {
+ let funktion = script.funktionAtPosition(startPosition);
+ funktion.compileTimestamp = startOf(timestamp, time);
+ funktion.compileTime = time;
+ }
+ }
+}
+
+
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ return {};
+ }
+
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ range: 'auto,auto',
+ };
+ }
+}
diff --git a/deps/v8/tools/perf-compare.py b/deps/v8/tools/perf-compare.py
index b7a795b453..75f3c73c6a 100755
--- a/deps/v8/tools/perf-compare.py
+++ b/deps/v8/tools/perf-compare.py
@@ -12,7 +12,6 @@ Examples:
'''
from collections import OrderedDict
-import commands
import json
import math
from argparse import ArgumentParser
diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py
index ac9f53f617..e3979360a7 100755
--- a/deps/v8/tools/perf-to-html.py
+++ b/deps/v8/tools/perf-to-html.py
@@ -12,7 +12,6 @@ from standard input or via the --filename option. Examples:
%prog -f results.json -t "ia32 results" -o results.html
'''
-import commands
import json
import math
from optparse import OptionParser
diff --git a/deps/v8/tools/predictable_wrapper.py b/deps/v8/tools/predictable_wrapper.py
new file mode 100644
index 0000000000..cf7bf00b3f
--- /dev/null
+++ b/deps/v8/tools/predictable_wrapper.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper script for verify-predictable mode. D8 is expected to be compiled with
+v8_enable_verify_predictable.
+
+The actual test command is expected to be passed to this wraper as is. E.g.:
+predictable_wrapper.py path/to/d8 --test --predictable --flag1 --flag2
+
+The command is run up to three times and the printed allocation hash is
+compared. Differences are reported as errors.
+"""
+
+import sys
+
+from testrunner.local import command
+
+MAX_TRIES = 3
+
+def main(args):
+ def allocation_str(stdout):
+ for line in reversed((stdout or '').splitlines()):
+ if line.startswith('### Allocations = '):
+ return line
+ return None
+
+ cmd = command.Command(args[0], args[1:])
+
+ previous_allocations = None
+ for run in range(1, MAX_TRIES + 1):
+ print '### Predictable run #%d' % run
+ output = cmd.execute()
+ if output.stdout:
+ print '### Stdout:'
+ print output.stdout
+ if output.stderr:
+ print '### Stderr:'
+ print output.stderr
+ print '### Return code: %s' % output.exit_code
+ if output.HasTimedOut():
+ # If we get a timeout in any run, we are in an unpredictable state. Just
+ # report it as a failure and don't rerun.
+ print '### Test timed out'
+ return 1
+ allocations = allocation_str(output.stdout)
+ if not allocations:
+ print ('### Test had no allocation output. Ensure this is built '
+ 'with v8_enable_verify_predictable and that '
+ '--verify-predictable is passed at the cmd line.')
+ return 2
+ if previous_allocations and previous_allocations != allocations:
+ print '### Allocations differ'
+ return 3
+ if run >= MAX_TRIES:
+ # No difference on the last run -> report a success.
+ return 0
+ previous_allocations = allocations
+ # Unreachable.
+ assert False
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 2290422459..9ac26ddb16 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -554,9 +554,15 @@ def CheckDeps(workspace):
def PyTests(workspace):
- test_scripts = join(workspace, 'tools', 'release', 'test_scripts.py')
- return subprocess.call(
- [sys.executable, test_scripts], stdout=subprocess.PIPE) == 0
+ result = True
+ for script in [
+ join(workspace, 'tools', 'release', 'test_scripts.py'),
+ join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
+ ]:
+ print 'Running ' + script
+ result &= subprocess.call(
+ [sys.executable, script], stdout=subprocess.PIPE) == 0
+ return result
def GetOptions():
@@ -573,8 +579,8 @@ def Main():
success = True
print "Running checkdeps..."
success &= CheckDeps(workspace)
- print "Running C++ lint check..."
if not options.no_lint:
+ print "Running C++ lint check..."
success &= CppLintProcessor().RunOnPath(workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
diff --git a/deps/v8/tools/process-heap-prof.py b/deps/v8/tools/process-heap-prof.py
deleted file mode 100755
index a26cbf1589..0000000000
--- a/deps/v8/tools/process-heap-prof.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is an utility for converting V8 heap logs into .hp files that can
-# be further processed using 'hp2ps' tool (bundled with GHC and Valgrind)
-# to produce heap usage histograms.
-
-# Sample usage:
-# $ ./shell --log-gc script.js
-# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps
-# ('-c' enables color, see hp2ps manual page for more options)
-# or
-# $ tools/process-heap-prof.py --js-cons-profile v8.log | hp2ps -c > script-heap-graph.ps
-# to get JS constructor profile
-
-
-import csv, sys, time, optparse
-
-def ProcessLogFile(filename, options):
- if options.js_cons_profile:
- itemname = 'heap-js-cons-item'
- else:
- itemname = 'heap-sample-item'
-
- first_call_time = None
- sample_time = 0.0
- sampling = False
- try:
- logfile = open(filename, 'rb')
- try:
- logreader = csv.reader(logfile)
-
- print('JOB "v8"')
- print('DATE "%s"' % time.asctime(time.localtime()))
- print('SAMPLE_UNIT "seconds"')
- print('VALUE_UNIT "bytes"')
-
- for row in logreader:
- if row[0] == 'heap-sample-begin' and row[1] == 'Heap':
- sample_time = float(row[3])/1000.0
- if first_call_time == None:
- first_call_time = sample_time
- sample_time -= first_call_time
- print('BEGIN_SAMPLE %.2f' % sample_time)
- sampling = True
- elif row[0] == 'heap-sample-end' and row[1] == 'Heap':
- print('END_SAMPLE %.2f' % sample_time)
- sampling = False
- elif row[0] == itemname and sampling:
- print(row[1]),
- if options.count:
- print('%d' % (int(row[2]))),
- if options.size:
- print('%d' % (int(row[3]))),
- print
- finally:
- logfile.close()
- except:
- sys.exit('can\'t open %s' % filename)
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("--js_cons_profile", help="Constructor profile",
- default=False, action="store_true")
- result.add_option("--size", help="Report object size",
- default=False, action="store_true")
- result.add_option("--count", help="Report object count",
- default=False, action="store_true")
- return result
-
-
-def ProcessOptions(options):
- if not options.size and not options.count:
- options.size = True
- return True
-
-
-def Main():
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- sys.exit();
-
- if not args:
- print "Missing logfile"
- sys.exit();
-
- ProcessLogFile(args[0], options)
-
-
-if __name__ == '__main__':
- sys.exit(Main())
diff --git a/deps/v8/tools/run-num-fuzzer.isolate b/deps/v8/tools/run-num-fuzzer.isolate
index 4bd3d8b6c0..d0aca421a7 100644
--- a/deps/v8/tools/run-num-fuzzer.isolate
+++ b/deps/v8/tools/run-num-fuzzer.isolate
@@ -14,6 +14,7 @@
'includes': [
'testrunner/testrunner.isolate',
'../src/d8.isolate',
+ '../test/benchmarks/benchmarks.isolate',
'../test/mjsunit/mjsunit.isolate',
'../test/webkit/webkit.isolate',
],
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 0f1646d9ea..3823eb510c 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -106,7 +106,7 @@ import re
import subprocess
import sys
-from testrunner.local import commands
+from testrunner.local import command
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
@@ -493,15 +493,23 @@ class RunnableConfig(GraphConfig):
suffix = ["--"] + self.test_flags if self.test_flags else []
return self.flags + (extra_flags or []) + [self.main] + suffix
- def GetCommand(self, shell_dir, extra_flags=None):
+ def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
extra_flags = extra_flags or []
- cmd = [os.path.join(shell_dir, self.binary)]
- if self.binary.endswith(".py"):
- cmd = [sys.executable] + cmd
if self.binary != 'd8' and '--prof' in extra_flags:
print "Profiler supported only on a benchmark run with d8"
- return cmd + self.GetCommandFlags(extra_flags=extra_flags)
+
+ if self.process_size:
+ cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
+ if self.binary.endswith('.py'):
+ # Copy cmd_prefix instead of update (+=).
+ cmd_prefix = cmd_prefix + [sys.executable]
+
+ return command.Command(
+ cmd_prefix=cmd_prefix,
+ shell=os.path.join(shell_dir, self.binary),
+ args=self.GetCommandFlags(extra_flags=extra_flags),
+ timeout=self.timeout or 60)
def Run(self, runner, trybot):
"""Iterates over several runs and handles the output for all traces."""
@@ -677,18 +685,9 @@ class DesktopPlatform(Platform):
suffix = ' - secondary' if secondary else ''
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
- if runnable.process_size:
- command = ["/usr/bin/time", "--format=MaxMemory: %MKB"]
- else:
- command = []
-
- command += self.command_prefix + runnable.GetCommand(shell_dir,
- self.extra_flags)
+ cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
try:
- output = commands.Execute(
- command,
- timeout=runnable.timeout,
- )
+ output = cmd.execute()
except OSError as e: # pragma: no cover
print title % "OSError"
print e
diff --git a/deps/v8/tools/testrunner/PRESUBMIT.py b/deps/v8/tools/testrunner/PRESUBMIT.py
new file mode 100644
index 0000000000..7f7596a85d
--- /dev/null
+++ b/deps/v8/tools/testrunner/PRESUBMIT.py
@@ -0,0 +1,8 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def CheckChangeOnCommit(input_api, output_api):
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '../unittests', whitelist=['run_tests_test.py$'])
+ return input_api.RunTests(tests)
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index b6ef6fb5cd..8fc09eed7b 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -3,6 +3,7 @@
# found in the LICENSE file.
+from collections import OrderedDict
import json
import optparse
import os
@@ -16,8 +17,11 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__))))
+from local import testsuite
from local import utils
+from testproc.shard import ShardProc
+
BASE_DIR = (
os.path.dirname(
@@ -62,6 +66,16 @@ TEST_MAP = {
"intl",
"unittests",
],
+ # This needs to stay in sync with test/d8_default.isolate.
+ "d8_default": [
+ # TODO(machenbach): uncomment after infra side lands.
+ #"debugger",
+ "mjsunit",
+ "webkit",
+ #"message",
+ #"preparser",
+ #"intl",
+ ],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"debugger",
@@ -177,16 +191,19 @@ class BuildConfig(object):
class BaseTestRunner(object):
- def __init__(self):
+ def __init__(self, basedir=None):
+ self.basedir = basedir or BASE_DIR
self.outdir = None
self.build_config = None
self.mode_name = None
self.mode_options = None
- def execute(self):
+ def execute(self, sys_args=None):
+ if sys_args is None: # pragma: no cover
+ sys_args = sys.argv[1:]
try:
parser = self._create_parser()
- options, args = self._parse_args(parser)
+ options, args = self._parse_args(parser, sys_args)
self._load_build_config(options)
@@ -197,10 +214,15 @@ class BaseTestRunner(object):
parser.print_help()
raise
+ args = self._parse_test_args(args)
+ suites = self._get_suites(args, options.verbose)
+
self._setup_env()
- return self._do_execute(options, args)
+ return self._do_execute(suites, args, options)
except TestRunnerError:
return 1
+ except KeyboardInterrupt:
+ return 2
def _create_parser(self):
parser = optparse.OptionParser()
@@ -227,15 +249,21 @@ class BaseTestRunner(object):
"directory will be used")
parser.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
+ parser.add_option("--shard-count",
+ help="Split tests into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
def _add_parser_options(self, parser):
pass
- def _parse_args(self, parser):
- options, args = parser.parse_args()
+ def _parse_args(self, parser, sys_args):
+ options, args = parser.parse_args(sys_args)
if any(map(lambda v: v and ',' in v,
- [options.arch, options.mode])):
+ [options.arch, options.mode])): # pragma: no cover
print 'Multiple arch/mode are deprecated'
raise TestRunnerError()
@@ -248,7 +276,7 @@ class BaseTestRunner(object):
except TestRunnerError:
pass
- if not self.build_config:
+ if not self.build_config: # pragma: no cover
print 'Failed to load build config'
raise TestRunnerError
@@ -274,14 +302,14 @@ class BaseTestRunner(object):
'%s.%s' % (options.arch, options.mode))
for outdir in outdirs():
- yield os.path.join(BASE_DIR, outdir)
+ yield os.path.join(self.basedir, outdir)
# buildbot option
if options.mode:
- yield os.path.join(BASE_DIR, outdir, options.mode)
+ yield os.path.join(self.basedir, outdir, options.mode)
def _get_gn_outdir(self):
- gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
+ gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
latest_timestamp = -1
latest_config = None
for gn_config in os.listdir(gn_out_dir):
@@ -305,7 +333,7 @@ class BaseTestRunner(object):
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
- except Exception:
+ except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
@@ -324,7 +352,7 @@ class BaseTestRunner(object):
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
- if options.mode not in MODES:
+ if options.mode not in MODES: # pragma: no cover
print '%s mode is invalid' % options.mode
raise TestRunnerError()
if MODES[options.mode].execution_mode != build_config_mode:
@@ -346,7 +374,7 @@ class BaseTestRunner(object):
options.arch, self.build_config.arch))
raise TestRunnerError()
- if options.shell_dir:
+ if options.shell_dir: # pragma: no cover
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
@@ -364,7 +392,7 @@ class BaseTestRunner(object):
def _setup_env(self):
# Use the v8 root as cwd as some test cases use "load" with relative paths.
- os.chdir(BASE_DIR)
+ os.chdir(self.basedir)
# Many tests assume an English interface.
os.environ['LANG'] = 'en_US.UTF-8'
@@ -403,7 +431,7 @@ class BaseTestRunner(object):
if self.build_config.tsan:
suppressions_file = os.path.join(
- BASE_DIR,
+ self.basedir,
'tools',
'sanitizers',
'tsan_suppressions.txt')
@@ -418,7 +446,7 @@ class BaseTestRunner(object):
def _get_external_symbolizer_option(self):
external_symbolizer_path = os.path.join(
- BASE_DIR,
+ self.basedir,
'third_party',
'llvm-build',
'Release+Asserts',
@@ -432,7 +460,84 @@ class BaseTestRunner(object):
return 'external_symbolizer_path=%s' % external_symbolizer_path
+ def _parse_test_args(self, args):
+ if not args:
+ args = self._get_default_suite_names()
+
+ # Expand arguments with grouped tests. The args should reflect the list
+ # of suites as otherwise filters would break.
+ def expand_test_group(name):
+ return TEST_MAP.get(name, [name])
+
+ return reduce(list.__add__, map(expand_test_group, args), [])
+
+ def _get_suites(self, args, verbose=False):
+ names = self._args_to_suite_names(args)
+ return self._load_suites(names, verbose)
+
+ def _args_to_suite_names(self, args):
+ # Use default tests if no test configuration was provided at the cmd line.
+ all_names = set(utils.GetSuitePaths(os.path.join(self.basedir, 'test')))
+ args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
+ return [name for name in args_names if name in all_names]
+
+ def _get_default_suite_names(self):
+ return []
+
+ def _expand_test_group(self, name):
+ return TEST_MAP.get(name, [name])
+
+ def _load_suites(self, names, verbose=False):
+ def load_suite(name):
+ if verbose:
+ print '>>> Loading test suite: %s' % name
+ return testsuite.TestSuite.LoadTestSuite(
+ os.path.join(self.basedir, 'test', name))
+ return map(load_suite, names)
# TODO(majeski): remove options & args parameters
- def _do_execute(self, options, args):
+ def _do_execute(self, suites, args, options):
raise NotImplementedError()
+
+ def _create_shard_proc(self, options):
+ myid, count = self._get_shard_info(options)
+ if count == 1:
+ return None
+ return ShardProc(myid - 1, count)
+
+ def _get_shard_info(self, options):
+ """
+ Returns pair:
+ (id of the current shard [1; number of shards], number of shards)
+ """
+ # Read gtest shard configuration from environment (e.g. set by swarming).
+ # If none is present, use values passed on the command line.
+ shard_count = int(
+ os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ shard_run = os.environ.get('GTEST_SHARD_INDEX')
+ if shard_run is not None:
+ # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+ shard_run = int(shard_run) + 1
+ else:
+ shard_run = options.shard_run
+
+ if options.shard_count > 1:
+ # Log if a value was passed on the cmd line and it differs from the
+ # environment variables.
+ if options.shard_count != shard_count: # pragma: no cover
+ print("shard_count from cmd line differs from environment variable "
+ "GTEST_TOTAL_SHARDS")
+ if (options.shard_run > 1 and
+ options.shard_run != shard_run): # pragma: no cover
+ print("shard_run from cmd line differs from environment variable "
+ "GTEST_SHARD_INDEX")
+
+ if shard_run < 1 or shard_run > shard_count:
+ # TODO(machenbach): Turn this into an assert. If that's wrong on the
+ # bots, printing will be quite useless. Or refactor this code to make
+ # sure we get a return code != 0 after testing if we got here.
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return 1, 1
+
+ return shard_run, shard_count
diff --git a/deps/v8/tools/testrunner/deopt_fuzzer.py b/deps/v8/tools/testrunner/deopt_fuzzer.py
index 75878d442c..5e6b79f5e9 100755
--- a/deps/v8/tools/testrunner/deopt_fuzzer.py
+++ b/deps/v8/tools/testrunner/deopt_fuzzer.py
@@ -26,7 +26,7 @@ from testrunner.local import verbose
from testrunner.objects import context
-DEFAULT_TESTS = ["mjsunit", "webkit"]
+DEFAULT_SUITES = ["mjsunit", "webkit"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
@@ -37,8 +37,8 @@ DISTRIBUTION_MODES = ["smooth", "random"]
class DeoptFuzzer(base_runner.BaseTestRunner):
- def __init__(self):
- super(DeoptFuzzer, self).__init__()
+ def __init__(self, *args, **kwargs):
+ super(DeoptFuzzer, self).__init__(*args, **kwargs)
class RandomDistribution:
def __init__(self, seed=None):
@@ -136,12 +136,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
- parser.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
parser.add_option("--seed", help="The seed for the random distribution",
type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -184,47 +178,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
options.coverage_lift = 0
return True
- def _shard_tests(self, tests, shard_count, shard_run):
- if shard_count < 2:
- return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
- def _do_execute(self, options, args):
- suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
-
- if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
- else:
- args_suites = set()
- for arg in args:
- suite = arg.split(os.path.sep)[0]
- if not suite in args_suites:
- args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(base_runner.BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- try:
- return self._execute(args, options, suites)
- except KeyboardInterrupt:
- return 2
-
-
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m deopt points with exponential
coverage.
@@ -235,8 +188,10 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
+ def _get_default_suite_names(self):
+ return DEFAULT_SUITES
- def _execute(self, args, options, suites):
+ def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
@@ -264,7 +219,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
- False, # No predictable mode.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
@@ -305,16 +259,16 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
- for t in s.tests:
- t.flags += s.GetStatusfileFlags(t)
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
- s.tests = [t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests]
+ s.tests = [t.create_variant(t.variant, analysis_flags, 'analysis')
+ for t in s.tests]
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
+ t.cmd = t.get_command(ctx)
test_id += 1
if num_tests == 0:
@@ -333,7 +287,7 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
for s in suites:
test_results = {}
for t in s.tests:
- for line in t.output.stdout.splitlines():
+ for line in runner.outputs[t].stdout.splitlines():
if line.startswith("=== Stress deopt counter: "):
test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
for t in s.tests:
@@ -357,17 +311,18 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
distribution = dist.Distribute(n_deopt, max_deopt)
if options.verbose:
print "%s %s" % (t.path, distribution)
- for i in distribution:
- fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
- s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ for n, d in enumerate(distribution):
+ fuzzing_flags = ["--deopt-every-n-times", "%d" % d]
+ s.tests.append(t.create_variant(t.variant, fuzzing_flags, n))
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
+ t.cmd = t.get_command(ctx)
test_id += 1
if num_tests == 0:
print "No tests to run."
- return 0
+ return exit_code
print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
diff --git a/deps/v8/tools/testrunner/gc_fuzzer.py b/deps/v8/tools/testrunner/gc_fuzzer.py
index 4130fff8be..18be227d98 100755
--- a/deps/v8/tools/testrunner/gc_fuzzer.py
+++ b/deps/v8/tools/testrunner/gc_fuzzer.py
@@ -27,7 +27,7 @@ from testrunner.local import verbose
from testrunner.objects import context
-DEFAULT_TESTS = ["mjsunit", "webkit"]
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
@@ -36,8 +36,8 @@ SLOW_ARCHS = ["arm",
class GCFuzzer(base_runner.BaseTestRunner):
- def __init__(self):
- super(GCFuzzer, self).__init__()
+ def __init__(self, *args, **kwargs):
+ super(GCFuzzer, self).__init__(*args, **kwargs)
self.fuzzer_rng = None
@@ -64,12 +64,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
- parser.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0,
@@ -102,47 +96,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
return True
- def _shard_tests(self, tests, shard_count, shard_run):
- if shard_count < 2:
- return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
- def _do_execute(self, options, args):
- suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
-
- if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
- else:
- args_suites = set()
- for arg in args:
- suite = arg.split(os.path.sep)[0]
- if not suite in args_suites:
- args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(base_runner.BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- try:
- return self._execute(args, options, suites)
- except KeyboardInterrupt:
- return 2
-
-
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m points with exponential coverage.
The coverage is expected to be between 0.0 and 1.0.
@@ -152,8 +105,10 @@ class GCFuzzer(base_runner.BaseTestRunner):
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
+ def _get_default_suite_names(self):
+ return DEFAULT_SUITES
- def _execute(self, args, options, suites):
+ def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
@@ -179,7 +134,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
- False, # No predictable mode.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
@@ -193,14 +147,12 @@ class GCFuzzer(base_runner.BaseTestRunner):
print('>>> Collection phase')
for s in suites:
- analysis_flags = [
- # > 100% to not influence default incremental marking, but we need this
- # flag to print reached incremental marking limit.
- '--stress_marking', '1000',
- '--trace_incremental_marking',
- ]
- s.tests = map(lambda t: t.CopyAddingFlags(t.variant, analysis_flags),
+ analysis_flags = ['--fuzzer-gc-analysis']
+ s.tests = map(lambda t: t.create_variant(t.variant, analysis_flags,
+ 'analysis'),
s.tests)
+ for t in s.tests:
+ t.cmd = t.get_command(ctx)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
@@ -211,13 +163,15 @@ class GCFuzzer(base_runner.BaseTestRunner):
for s in suites:
for t in s.tests:
# Skip failed tests.
- if s.HasUnexpectedOutput(t):
+ if t.output_proc.has_unexpected_output(runner.outputs[t]):
print '%s failed, skipping' % t.path
continue
- max_limit = self._get_max_limit_reached(t)
+ max_limit = self._get_max_limit_reached(runner.outputs[t])
if max_limit:
test_results[t.path] = max_limit
+ runner = None
+
if options.dump_results_file:
with file("%s.%d.txt" % (options.dump_results_file, time.time()),
"w") as f:
@@ -237,7 +191,7 @@ class GCFuzzer(base_runner.BaseTestRunner):
if options.verbose:
print ('%s [x%d] (max marking limit=%.02f)' %
(t.path, subtests_count, max_percent))
- for _ in xrange(0, subtests_count):
+ for i in xrange(0, subtests_count):
fuzzer_seed = self._next_fuzzer_seed()
fuzzing_flags = [
'--stress_marking', str(max_percent),
@@ -245,12 +199,14 @@ class GCFuzzer(base_runner.BaseTestRunner):
]
if options.stress_compaction:
fuzzing_flags.append('--stress_compaction_random')
- s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ s.tests.append(t.create_variant(t.variant, fuzzing_flags, i))
+ for t in s.tests:
+ t.cmd = t.get_command(ctx)
num_tests += len(s.tests)
if num_tests == 0:
print "No tests to run."
- return 0
+ return exit_code
print(">>> Fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
@@ -292,8 +248,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
- for t in s.tests:
- t.flags += s.GetStatusfileFlags(t)
num_tests += len(s.tests)
for t in s.tests:
@@ -304,31 +258,16 @@ class GCFuzzer(base_runner.BaseTestRunner):
# Parses test stdout and returns what was the highest reached percent of the
# incremental marking limit (0-100).
- # Skips values >=100% since they already trigger incremental marking.
@staticmethod
- def _get_max_limit_reached(test):
- def is_im_line(l):
- return 'IncrementalMarking' in l and '% of the memory limit reached' in l
-
- def line_to_percent(l):
- return filter(lambda part: '%' in part, l.split(' '))[0]
-
- def percent_str_to_float(s):
- return float(s[:-1])
-
- if not (test.output and test.output.stdout):
+ def _get_max_limit_reached(output):
+ if not output.stdout:
return None
- im_lines = filter(is_im_line, test.output.stdout.splitlines())
- percents_str = map(line_to_percent, im_lines)
- percents = map(percent_str_to_float, percents_str)
-
- # Skip >= 100%.
- percents = filter(lambda p: p < 100, percents)
+ for l in reversed(output.stdout.splitlines()):
+ if l.startswith('### Maximum marking limit reached ='):
+ return float(l.split()[6])
- if not percents:
- return None
- return max(percents)
+ return None
def _next_fuzzer_seed(self):
fuzzer_seed = None
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
new file mode 100644
index 0000000000..93b1ac9497
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -0,0 +1,171 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import subprocess
+import sys
+import threading
+import time
+
+from ..local import utils
+from ..objects import output
+
+
+SEM_INVALID_VALUE = -1
+SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
+
+
+class BaseCommand(object):
+ def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+ verbose=False):
+ assert(timeout > 0)
+
+ self.shell = shell
+ self.args = args or []
+ self.cmd_prefix = cmd_prefix or []
+ self.timeout = timeout
+ self.env = env or {}
+ self.verbose = verbose
+
+ def execute(self, **additional_popen_kwargs):
+ if self.verbose:
+ print '# %s' % self
+
+ process = self._start_process(**additional_popen_kwargs)
+
+ # Variable to communicate with the timer.
+ timeout_occured = [False]
+ timer = threading.Timer(
+ self.timeout, self._on_timeout, [process, timeout_occured])
+ timer.start()
+
+ start_time = time.time()
+ stdout, stderr = process.communicate()
+ duration = time.time() - start_time
+
+ timer.cancel()
+
+ return output.Output(
+ process.returncode,
+ timeout_occured[0],
+ stdout.decode('utf-8', 'replace').encode('utf-8'),
+ stderr.decode('utf-8', 'replace').encode('utf-8'),
+ process.pid,
+ duration
+ )
+
+ def _start_process(self, **additional_popen_kwargs):
+ try:
+ return subprocess.Popen(
+ args=self._get_popen_args(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=self._get_env(),
+ **additional_popen_kwargs
+ )
+ except Exception as e:
+ sys.stderr.write('Error executing: %s\n' % self)
+ raise e
+
+ def _get_popen_args(self):
+ return self._to_args_list()
+
+ def _get_env(self):
+ env = os.environ.copy()
+ env.update(self.env)
+ # GTest shard information is read by the V8 tests runner. Make sure it
+ # doesn't leak into the execution of gtests we're wrapping. Those might
+ # otherwise apply a second level of sharding and as a result skip tests.
+ env.pop('GTEST_TOTAL_SHARDS', None)
+ env.pop('GTEST_SHARD_INDEX', None)
+ return env
+
+ def _kill_process(self, process):
+ raise NotImplementedError()
+
+ def _on_timeout(self, process, timeout_occured):
+ timeout_occured[0] = True
+ try:
+ self._kill_process(process)
+ except OSError:
+ sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+ def __str__(self):
+ return self.to_string()
+
+ def to_string(self, relative=False):
+ def escape(part):
+ # Escape spaces. We may need to escape more characters for this to work
+ # properly.
+ if ' ' in part:
+ return '"%s"' % part
+ return part
+
+ parts = map(escape, self._to_args_list())
+ cmd = ' '.join(parts)
+ if relative:
+ cmd = cmd.replace(os.getcwd() + os.sep, '')
+ return cmd
+
+ def _to_args_list(self):
+ return self.cmd_prefix + [self.shell] + self.args
+
+
+class PosixCommand(BaseCommand):
+ def _kill_process(self, process):
+ process.kill()
+
+
+class WindowsCommand(BaseCommand):
+ def _start_process(self, **kwargs):
+ # Try to change the error mode to avoid dialogs on fatal errors. Don't
+ # touch any existing error mode flags by merging the existing error mode.
+ # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
+ def set_error_mode(mode):
+ prev_error_mode = SEM_INVALID_VALUE
+ try:
+ import ctypes
+ prev_error_mode = (
+ ctypes.windll.kernel32.SetErrorMode(mode)) #@UndefinedVariable
+ except ImportError:
+ pass
+ return prev_error_mode
+
+ error_mode = SEM_NOGPFAULTERRORBOX
+ prev_error_mode = set_error_mode(error_mode)
+ set_error_mode(error_mode | prev_error_mode)
+
+ try:
+ return super(WindowsCommand, self)._start_process(**kwargs)
+ finally:
+ if prev_error_mode != SEM_INVALID_VALUE:
+ set_error_mode(prev_error_mode)
+
+ def _get_popen_args(self):
+ return subprocess.list2cmdline(self._to_args_list())
+
+ def _kill_process(self, process):
+ if self.verbose:
+ print 'Attempting to kill process %d' % process.pid
+ sys.stdout.flush()
+ tk = subprocess.Popen(
+ 'taskkill /T /F /PID %d' % process.pid,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = tk.communicate()
+ if self.verbose:
+ print 'Taskkill results for %d' % process.pid
+ print stdout
+ print stderr
+ print 'Return code: %d' % tk.returncode
+ sys.stdout.flush()
+
+
+# Set the Command class to the OS-specific version.
+if utils.IsWindows():
+ Command = WindowsCommand
+else:
+ Command = PosixCommand
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
deleted file mode 100644
index 4afd450d2f..0000000000
--- a/deps/v8/tools/testrunner/local/commands.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import subprocess
-import sys
-from threading import Timer
-
-from ..local import utils
-from ..objects import output
-
-
-SEM_INVALID_VALUE = -1
-SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
-
-
-def Win32SetErrorMode(mode):
- prev_error_mode = SEM_INVALID_VALUE
- try:
- import ctypes
- prev_error_mode = \
- ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
- except ImportError:
- pass
- return prev_error_mode
-
-
-def RunProcess(verbose, timeout, args, additional_env, **rest):
- if verbose: print "#", " ".join(args)
- popen_args = args
- prev_error_mode = SEM_INVALID_VALUE
- if utils.IsWindows():
- popen_args = subprocess.list2cmdline(args)
- # Try to change the error mode to avoid dialogs on fatal errors. Don't
- # touch any existing error mode flags by merging the existing error mode.
- # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
- error_mode = SEM_NOGPFAULTERRORBOX
- prev_error_mode = Win32SetErrorMode(error_mode)
- Win32SetErrorMode(error_mode | prev_error_mode)
-
- env = os.environ.copy()
- env.update(additional_env)
- # GTest shard information is read by the V8 tests runner. Make sure it
- # doesn't leak into the execution of gtests we're wrapping. Those might
- # otherwise apply a second level of sharding and as a result skip tests.
- env.pop('GTEST_TOTAL_SHARDS', None)
- env.pop('GTEST_SHARD_INDEX', None)
-
- try:
- process = subprocess.Popen(
- args=popen_args,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=env,
- **rest
- )
- except Exception as e:
- sys.stderr.write("Error executing: %s\n" % popen_args)
- raise e
-
- if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
- Win32SetErrorMode(prev_error_mode)
-
- def kill_process(process, timeout_result):
- timeout_result[0] = True
- try:
- if utils.IsWindows():
- if verbose:
- print "Attempting to kill process %d" % process.pid
- sys.stdout.flush()
- tk = subprocess.Popen(
- 'taskkill /T /F /PID %d' % process.pid,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- stdout, stderr = tk.communicate()
- if verbose:
- print "Taskkill results for %d" % process.pid
- print stdout
- print stderr
- print "Return code: %d" % tk.returncode
- sys.stdout.flush()
- else:
- if utils.GuessOS() == "macos":
- # TODO(machenbach): Temporary output for investigating hanging test
- # driver on mac.
- print "Attempting to kill process %d - cmd %s" % (process.pid, args)
- try:
- print subprocess.check_output(
- "ps -e | egrep 'd8|cctest|unittests'", shell=True)
- except Exception:
- pass
- sys.stdout.flush()
- process.kill()
- if utils.GuessOS() == "macos":
- # TODO(machenbach): Temporary output for investigating hanging test
- # driver on mac. This will probably not print much, since kill only
- # sends the signal.
- print "Return code after signalling the kill: %s" % process.returncode
- sys.stdout.flush()
-
- except OSError:
- sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
-
- # Pseudo object to communicate with timer thread.
- timeout_result = [False]
-
- timer = Timer(timeout, kill_process, [process, timeout_result])
- timer.start()
- stdout, stderr = process.communicate()
- timer.cancel()
-
- return output.Output(
- process.returncode,
- timeout_result[0],
- stdout.decode('utf-8', 'replace').encode('utf-8'),
- stderr.decode('utf-8', 'replace').encode('utf-8'),
- process.pid,
- )
-
-
-# TODO(machenbach): Instead of passing args around, we should introduce an
-# immutable Command class (that just represents the command with all flags and
-# is pretty-printable) and a member method for running such a command.
-def Execute(args, verbose=False, timeout=None, env=None):
- args = [ c for c in args if c != "" ]
- return RunProcess(verbose, timeout, args, env or {})
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index 8cc3556cae..d6d0725365 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -31,15 +31,14 @@ import os
import re
import shutil
import sys
-import time
+import traceback
-from pool import Pool
-from . import commands
+from . import command
from . import perfdata
from . import statusfile
-from . import testsuite
from . import utils
-from ..objects import output
+from . pool import Pool
+from ..objects import predictable
# Base dir of the v8 checkout.
@@ -48,76 +47,22 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
TEST_DIR = os.path.join(BASE_DIR, "test")
-class Instructions(object):
- def __init__(self, command, test_id, timeout, verbose, env):
- self.command = command
- self.id = test_id
- self.timeout = timeout
- self.verbose = verbose
- self.env = env
-
-
# Structure that keeps global information per worker process.
ProcessContext = collections.namedtuple(
- "process_context", ["suites", "context"])
+ 'process_context', ['sancov_dir'])
-def MakeProcessContext(context, suite_names):
- """Generate a process-local context.
+TestJobResult = collections.namedtuple(
+ 'TestJobResult', ['id', 'outproc_result'])
- This reloads all suites per process and stores the global context.
+def MakeProcessContext(sancov_dir):
+ return ProcessContext(sancov_dir)
- Args:
- context: The global context from the test runner.
- suite_names (list of str): Suite names as loaded by the parent process.
- Load the same suites in each subprocess.
- """
- suites = {}
- for root in suite_names:
- # Don't reinitialize global state as this is concurrently called from
- # different processes.
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(TEST_DIR, root), global_init=False)
- if suite:
- suites[suite.name] = suite
- return ProcessContext(suites, context)
-
-
-def GetCommand(test, context):
- d8testflag = []
- shell = test.suite.GetShellForTestCase(test)
- if shell == "d8":
- d8testflag = ["--test"]
- if utils.IsWindows():
- shell += ".exe"
- if context.random_seed:
- d8testflag += ["--random-seed=%s" % context.random_seed]
- files, flags, env = test.suite.GetParametersForTestCase(test, context)
- cmd = (
- context.command_prefix +
- [os.path.abspath(os.path.join(context.shell_dir, shell))] +
- d8testflag +
- files +
- context.extra_flags +
- # Flags from test cases can overwrite extra cmd-line flags.
- flags
- )
- return cmd, env
-
-
-def _GetInstructions(test, context):
- command, env = GetCommand(test, context)
- timeout = context.timeout
- if ("--stress-opt" in test.flags or
- "--stress-opt" in context.mode_flags or
- "--stress-opt" in context.extra_flags):
- timeout *= 4
- if "--noenable-vfp3" in context.extra_flags:
- timeout *= 2
-
- # TODO(majeski): make it slow outcome dependent.
- timeout *= 2
- return Instructions(command, test.id, timeout, context.verbose, env)
+
+# Global function for multiprocessing, because pickling a static method doesn't
+# work on Windows.
+def run_job(job, process_context):
+ return job.run(process_context)
class Job(object):
@@ -126,31 +71,18 @@ class Job(object):
All contained fields will be pickled/unpickled.
"""
- def Run(self, process_context):
- """Executes the job.
-
- Args:
- process_context: Process-local information that is initialized by the
- executing worker.
- """
+ def run(self, process_context):
raise NotImplementedError()
-def SetupProblem(exception, test):
- stderr = ">>> EXCEPTION: %s\n" % exception
- match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception))
- if match:
- # Extra debuging information when files are claimed missing.
- f = match.group(1)
- stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f))
- return test.id, output.Output(1, False, "", stderr, None), 0
-
-
class TestJob(Job):
- def __init__(self, test):
- self.test = test
+ def __init__(self, test_id, cmd, outproc, run_num):
+ self.test_id = test_id
+ self.cmd = cmd
+ self.outproc = outproc
+ self.run_num = run_num
- def _rename_coverage_data(self, output, context):
+ def _rename_coverage_data(self, out, sancov_dir):
"""Rename coverage data.
Rename files with PIDs to files with unique test IDs, because the number
@@ -159,64 +91,53 @@ class TestJob(Job):
42 is the test ID and 1 is the attempt (the same test might be rerun on
failures).
"""
- if context.sancov_dir and output.pid is not None:
- shell = self.test.suite.GetShellForTestCase(self.test)
- sancov_file = os.path.join(
- context.sancov_dir, "%s.%d.sancov" % (shell, output.pid))
+ if sancov_dir and out.pid is not None:
+ # Doesn't work on windows so basename is sufficient to get the shell name.
+ shell = os.path.basename(self.cmd.shell)
+ sancov_file = os.path.join(sancov_dir, "%s.%d.sancov" % (shell, out.pid))
# Some tests are expected to fail and don't produce coverage data.
if os.path.exists(sancov_file):
parts = sancov_file.split(".")
new_sancov_file = ".".join(
parts[:-2] +
- ["test", str(self.test.id), str(self.test.run)] +
+ ["test", str(self.test_id), str(self.run_num)] +
parts[-1:]
)
assert not os.path.exists(new_sancov_file)
os.rename(sancov_file, new_sancov_file)
- def Run(self, process_context):
- try:
- # Retrieve a new suite object on the worker-process side. The original
- # suite object isn't pickled.
- self.test.SetSuiteObject(process_context.suites)
- instr = _GetInstructions(self.test, process_context.context)
- except Exception, e:
- # TODO(majeski): Better exception reporting.
- return SetupProblem(e, self.test)
-
- start_time = time.time()
- output = commands.Execute(instr.command, instr.verbose, instr.timeout,
- instr.env)
- self._rename_coverage_data(output, process_context.context)
- return (instr.id, output, time.time() - start_time)
-
-
-def RunTest(job, process_context):
- return job.Run(process_context)
+ def run(self, context):
+ output = self.cmd.execute()
+ self._rename_coverage_data(output, context.sancov_dir)
+ return TestJobResult(self.test_id, self.outproc.process(output))
class Runner(object):
- def __init__(self, suites, progress_indicator, context):
+ def __init__(self, suites, progress_indicator, context, outproc_factory=None):
self.datapath = os.path.join("out", "testrunner_data")
self.perf_data_manager = perfdata.GetPerfDataManager(
context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
+ self.outproc_factory = outproc_factory or (lambda test: test.output_proc)
self.tests = [t for s in suites for t in s.tests]
+
+ # TODO(majeski): Pass dynamically instead of keeping them in the runner.
+ # Maybe some observer?
+ self.outputs = {t: None for t in self.tests}
+
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
- slow_key = lambda t: statusfile.IsSlow(t.suite.GetStatusFileOutcomes(t))
- self.tests.sort(key=slow_key, reverse=True)
+ self.tests.sort(key=lambda t: t.is_slow, reverse=True)
- # Sort by stored duration of not opted out.
+ # Sort by stored duration if not opted out.
if not context.no_sorting:
- for t in self.tests:
- t.duration = self.perfdata.FetchPerfData(t) or 1.0
- self.tests.sort(key=lambda t: t.duration, reverse=True)
+ self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
+ reverse=True)
self._CommonInit(suites, progress_indicator, context)
@@ -242,7 +163,7 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
- def _MaybeRerun(self, pool, test):
+ def _MaybeRerun(self, pool, test, result):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
# test. <= as the flag controls reruns not including the first run.
@@ -254,25 +175,25 @@ class Runner(object):
# Don't rerun this if the overall number of rerun tests has been
# reached.
return
- if test.run >= 2 and test.duration > self.context.timeout / 20.0:
+ if (test.run >= 2 and
+ result.output.duration > self.context.timeout / 20.0):
# Rerun slow tests at most once.
return
# Rerun this test.
- test.duration = None
- test.output = None
test.run += 1
- pool.add([TestJob(test)])
+ pool.add([
+ TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
+ ])
self.remaining += 1
self.total += 1
- def _ProcessTestNormal(self, test, result, pool):
- test.output = result[1]
- test.duration = result[2]
- has_unexpected_output = test.suite.HasUnexpectedOutput(test)
+ def _ProcessTest(self, test, result, pool):
+ self.outputs[test] = result.output
+ has_unexpected_output = result.has_unexpected_output
if has_unexpected_output:
self.failed.append(test)
- if test.output.HasCrashed():
+ if result.output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
@@ -280,57 +201,15 @@ class Runner(object):
# For the indicator, everything that happens after the first run is treated
# as unexpected even if it flakily passes in order to include it in the
# output.
- self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
+ self.indicator.HasRun(test, result.output,
+ has_unexpected_output or test.run > 1)
if has_unexpected_output:
# Rerun test failures after the indicator has processed the results.
self._VerbosePrint("Attempting to rerun test after failure.")
- self._MaybeRerun(pool, test)
+ self._MaybeRerun(pool, test, result)
# Update the perf database if the test succeeded.
return not has_unexpected_output
- def _ProcessTestPredictable(self, test, result, pool):
- def HasDifferentAllocations(output1, output2):
- def AllocationStr(stdout):
- for line in reversed((stdout or "").splitlines()):
- if line.startswith("### Allocations = "):
- self.printed_allocations = True
- return line
- return ""
- return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
-
- # Always pass the test duration for the database update.
- test.duration = result[2]
- if test.run == 1 and result[1].HasTimedOut():
- # If we get a timeout in the first run, we are already in an
- # unpredictable state. Just report it as a failure and don't rerun.
- test.output = result[1]
- self.remaining -= 1
- self.failed.append(test)
- self.indicator.HasRun(test, True)
- if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
- # From the second run on, check for different allocations. If a
- # difference is found, call the indicator twice to report both tests.
- # All runs of each test are counted as one for the statistic.
- self.remaining -= 1
- self.failed.append(test)
- self.indicator.HasRun(test, True)
- test.output = result[1]
- self.indicator.HasRun(test, True)
- elif test.run >= 3:
- # No difference on the third run -> report a success.
- self.remaining -= 1
- self.succeeded += 1
- test.output = result[1]
- self.indicator.HasRun(test, False)
- else:
- # No difference yet and less than three runs -> add another run and
- # remember the output for comparison.
- test.run += 1
- test.output = result[1]
- pool.add([TestJob(test)])
- # Always update the perf database.
- return True
-
def Run(self, jobs):
self.indicator.Starting()
self._RunInternal(jobs)
@@ -350,50 +229,54 @@ class Runner(object):
assert test.id >= 0
test_map[test.id] = test
try:
- yield [TestJob(test)]
+ yield [
+ TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
+ ]
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
- queued_exception[0] = e
+ queued_exception[0] = e, traceback.format_exc()
continue
try:
it = pool.imap_unordered(
- fn=RunTest,
+ fn=run_job,
gen=gen_tests(),
process_context_fn=MakeProcessContext,
- process_context_args=[self.context, self.suite_names],
+ process_context_args=[self.context.sancov_dir],
)
for result in it:
if result.heartbeat:
self.indicator.Heartbeat()
continue
- test = test_map[result.value[0]]
- if self.context.predictable:
- update_perf = self._ProcessTestPredictable(test, result.value, pool)
- else:
- update_perf = self._ProcessTestNormal(test, result.value, pool)
+
+ job_result = result.value
+ test_id = job_result.id
+ outproc_result = job_result.outproc_result
+
+ test = test_map[test_id]
+ update_perf = self._ProcessTest(test, outproc_result, pool)
if update_perf:
- self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
+ self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
+ test, outproc_result.output.duration))
+ except KeyboardInterrupt:
+ raise
+ except:
+ traceback.print_exc()
+ raise
finally:
self._VerbosePrint("Closing process pool.")
pool.terminate()
self._VerbosePrint("Closing database connection.")
- self._RunPerfSafe(lambda: self.perf_data_manager.close())
+ self._RunPerfSafe(self.perf_data_manager.close)
if self.perf_failures:
# Nuke perf data in case of failures. This might not work on windows as
# some files might still be open.
print "Deleting perf test data due to db corruption."
shutil.rmtree(self.datapath)
if queued_exception[0]:
- raise queued_exception[0]
-
- # Make sure that any allocations were printed in predictable mode (if we
- # ran any tests).
- assert (
- not self.total or
- not self.context.predictable or
- self.printed_allocations
- )
+ e, stacktrace = queued_exception[0]
+ print stacktrace
+ raise e
def _VerbosePrint(self, text):
if self.context.verbose:
@@ -403,6 +286,8 @@ class Runner(object):
class BreakNowException(Exception):
def __init__(self, value):
+ super(BreakNowException, self).__init__()
self.value = value
+
def __str__(self):
return repr(self.value)
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
index d2748febd9..52f31ec422 100644
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ b/deps/v8/tools/testrunner/local/junit_output.py
@@ -34,9 +34,10 @@ class JUnitTestOutput:
self.root = xml.Element("testsuite")
self.root.attrib["name"] = test_suite_name
- def HasRunTest(self, test_name, test_duration, test_failure):
+ def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = " ".join(test_name)
+ testCaseElement.attrib["name"] = test_name
+ testCaseElement.attrib["cmd"] = test_cmd
testCaseElement.attrib["time"] = str(round(test_duration, 3))
if len(test_failure):
failureElement = xml.Element("failure")
@@ -44,5 +45,5 @@ class JUnitTestOutput:
testCaseElement.append(failureElement)
self.root.append(testCaseElement)
- def FinishAndWrite(self, file):
- xml.ElementTree(self.root).write(file, "UTF-8")
+ def FinishAndWrite(self, f):
+ xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/perfdata.py b/deps/v8/tools/testrunner/local/perfdata.py
index 29ebff773a..4cb618b0be 100644
--- a/deps/v8/tools/testrunner/local/perfdata.py
+++ b/deps/v8/tools/testrunner/local/perfdata.py
@@ -62,22 +62,17 @@ class PerfDataStore(object):
self.database.close()
self.closed = True
- def GetKey(self, test):
- """Computes the key used to access data for the given testcase."""
- flags = "".join(test.flags)
- return str("%s.%s.%s" % (test.suitename(), test.path, flags))
-
def FetchPerfData(self, test):
"""Returns the observed duration for |test| as read from the store."""
- key = self.GetKey(test)
+ key = test.get_id()
if key in self.database:
return self.database[key].avg
return None
- def UpdatePerfData(self, test):
- """Updates the persisted value in the store with test.duration."""
- testkey = self.GetKey(test)
- self.RawUpdatePerfData(testkey, test.duration)
+ def UpdatePerfData(self, test, duration):
+ """Updates the persisted value in the store with duration."""
+ testkey = test.get_id()
+ self.RawUpdatePerfData(testkey, duration)
def RawUpdatePerfData(self, testkey, duration):
with self.lock:
@@ -121,7 +116,7 @@ class PerfDataManager(object):
class NullPerfDataStore(object):
- def UpdatePerfData(self, test):
+ def UpdatePerfData(self, test, duration):
pass
def FetchPerfData(self, test):
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 99996ee3ce..9199b62d8a 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -8,6 +8,21 @@ from multiprocessing import Event, Process, Queue
import traceback
+def setup_testing():
+ """For testing only: Use threading under the hood instead of multiprocessing
+ to make coverage work.
+ """
+ global Queue
+ global Event
+ global Process
+ del Queue
+ del Event
+ del Process
+ from Queue import Queue
+ from threading import Event
+ from threading import Thread as Process
+
+
class NormalResult():
def __init__(self, result):
self.result = result
@@ -120,8 +135,8 @@ class Pool():
self.done,
process_context_fn,
process_context_args))
- self.processes.append(p)
p.start()
+ self.processes.append(p)
self.advance(gen)
while self.count > 0:
@@ -145,6 +160,11 @@ class Pool():
else:
yield MaybeResult.create_result(result.result)
self.advance(gen)
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ traceback.print_exc()
+ print(">>> EXCEPTION: %s" % e)
finally:
self.terminate()
if internal_error:
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index e57a6e36c9..f6ebddf2e5 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -32,12 +32,9 @@ import os
import sys
import time
-from . import execution
from . import junit_output
from . import statusfile
-
-
-ABS_PATH_PREFIX = os.getcwd() + os.sep
+from ..testproc import progress as progress_proc
class ProgressIndicator(object):
@@ -54,33 +51,26 @@ class ProgressIndicator(object):
def Done(self):
pass
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
pass
def Heartbeat(self):
pass
def PrintFailureHeader(self, test):
- if test.suite.IsNegativeTest(test):
+ if test.output_proc.negative:
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
- 'label': test.GetLabel(),
- 'negative': negative_marker
+ 'label': test,
+ 'negative': negative_marker,
}
- def _EscapeCommand(self, test):
- command, _ = execution.GetCommand(test, self.runner.context)
- parts = []
- for part in command:
- if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
- else:
- parts.append(part)
- return " ".join(parts)
+ def ToProgressIndicatorProc(self):
+ print ('Warning: %s is not available as a processor' %
+ self.__class__.__name__)
+ return None
class IndicatorNotifier(object):
@@ -91,6 +81,9 @@ class IndicatorNotifier(object):
def Register(self, indicator):
self.indicators.append(indicator)
+ def ToProgressIndicatorProcs(self):
+ return [i.ToProgressIndicatorProc() for i in self.indicators]
+
# Forge all generic event-dispatching methods in IndicatorNotifier, which are
# part of the ProgressIndicator interface.
@@ -116,18 +109,19 @@ class SimpleProgressIndicator(ProgressIndicator):
def Done(self):
print
for failed in self.runner.failed:
+ output = self.runner.outputs[failed]
self.PrintFailureHeader(failed)
- if failed.output.stderr:
+ if output.stderr:
print "--- stderr ---"
- print failed.output.stderr.strip()
- if failed.output.stdout:
+ print output.stderr.strip()
+ if output.stdout:
print "--- stdout ---"
- print failed.output.stdout.strip()
- print "Command: %s" % self._EscapeCommand(failed)
- if failed.output.HasCrashed():
- print "exit code: %d" % failed.output.exit_code
+ print output.stdout.strip()
+ print "Command: %s" % failed.cmd.to_string()
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
print "--- CRASHED ---"
- if failed.output.HasTimedOut():
+ if output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
@@ -144,33 +138,36 @@ class SimpleProgressIndicator(ProgressIndicator):
class VerboseProgressIndicator(SimpleProgressIndicator):
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
if has_unexpected_output:
- if test.output.HasCrashed():
+ if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
- print 'Done running %s: %s' % (test.GetLabel(), outcome)
+ print 'Done running %s: %s' % (test, outcome)
sys.stdout.flush()
def Heartbeat(self):
print 'Still working...'
sys.stdout.flush()
+ def ToProgressIndicatorProc(self):
+ return progress_proc.VerboseProgressIndicator()
+
class DotsProgressIndicator(SimpleProgressIndicator):
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
- if test.output.HasCrashed():
+ if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
- elif test.output.HasTimedOut():
+ elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
@@ -180,6 +177,9 @@ class DotsProgressIndicator(SimpleProgressIndicator):
sys.stdout.write('.')
sys.stdout.flush()
+ def ToProgressIndicatorProc(self):
+ return progress_proc.DotsProgressIndicator()
+
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
@@ -194,22 +194,22 @@ class CompactProgressIndicator(ProgressIndicator):
self.PrintProgress('Done')
print "" # Line break.
- def HasRun(self, test, has_unexpected_output):
- self.PrintProgress(test.GetLabel())
+ def HasRun(self, test, output, has_unexpected_output):
+ self.PrintProgress(str(test))
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
- stdout = test.output.stdout.strip()
+ stdout = output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
- stderr = test.output.stderr.strip()
+ stderr = output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
- print "Command: %s" % self._EscapeCommand(test)
- if test.output.HasCrashed():
- print "exit code: %d" % test.output.exit_code
+ print "Command: %s" % test.cmd.to_string()
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
print "--- CRASHED ---"
- if test.output.HasTimedOut():
+ if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
@@ -254,6 +254,9 @@ class ColorProgressIndicator(CompactProgressIndicator):
def ClearLine(self, last_line_length):
print "\033[1K\r",
+ def ToProgressIndicatorProc(self):
+ return progress_proc.ColorProgressIndicator()
+
class MonochromeProgressIndicator(CompactProgressIndicator):
@@ -269,10 +272,15 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
+ def ToProgressIndicatorProc(self):
+ return progress_proc.MonochromeProgressIndicator()
-class JUnitTestProgressIndicator(ProgressIndicator):
+class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, junitout, junittestsuite):
+ super(JUnitTestProgressIndicator, self).__init__()
+ self.junitout = junitout
+ self.juinttestsuite = junittestsuite
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
@@ -284,29 +292,37 @@ class JUnitTestProgressIndicator(ProgressIndicator):
if self.outfile != sys.stdout:
self.outfile.close()
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
fail_text = ""
if has_unexpected_output:
- stdout = test.output.stdout.strip()
+ stdout = output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
- stderr = test.output.stderr.strip()
+ stderr = output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % self._EscapeCommand(test)
- if test.output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
- if test.output.HasTimedOut():
+ fail_text += "Command: %s" % test.cmd.to_string()
+ if output.HasCrashed():
+ fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
+ if output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
- [test.GetLabel()] + self.runner.context.mode_flags + test.flags,
- test.duration,
- fail_text)
+ test_name=str(test),
+ test_cmd=test.cmd.to_string(relative=True),
+ test_duration=output.duration,
+ test_failure=fail_text)
+
+ def ToProgressIndicatorProc(self):
+ if self.outfile != sys.stdout:
+ self.outfile.close()
+ return progress_proc.JUnitTestProgressIndicator(self.junitout,
+ self.junittestsuite)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed):
+ super(JsonTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
@@ -314,6 +330,10 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.results = []
self.tests = []
+ def ToProgressIndicatorProc(self):
+ return progress_proc.JsonTestProgressIndicator(
+ self.json_test_results, self.arch, self.mode, self.random_seed)
+
def Done(self):
complete_results = []
if os.path.exists(self.json_test_results):
@@ -325,20 +345,19 @@ class JsonTestProgressIndicator(ProgressIndicator):
if self.tests:
# Get duration mean.
duration_mean = (
- sum(t.duration for t in self.tests) / float(len(self.tests)))
+ sum(duration for (_, duration) in self.tests) /
+ float(len(self.tests)))
# Sort tests by duration.
- timed_tests = [t for t in self.tests if t.duration is not None]
- timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
+ self.tests.sort(key=lambda (_, duration): duration, reverse=True)
slowest_tests = [
{
- "name": test.GetLabel(),
- "flags": test.flags,
- "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
- "duration": test.duration,
- "marked_slow": statusfile.IsSlow(
- test.suite.GetStatusFileOutcomes(test)),
- } for test in timed_tests[:20]
+ "name": str(test),
+ "flags": test.cmd.args,
+ "command": test.cmd.to_string(relative=True),
+ "duration": duration,
+ "marked_slow": test.is_slow,
+ } for (test, duration) in self.tests[:20]
]
complete_results.append({
@@ -353,30 +372,30 @@ class JsonTestProgressIndicator(ProgressIndicator):
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
# Buffer all tests for sorting the durations in the end.
- self.tests.append(test)
+ self.tests.append((test, output.duration))
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
- "name": test.GetLabel(),
- "flags": test.flags,
- "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
+ "name": str(test),
+ "flags": test.cmd.args,
+ "command": test.cmd.to_string(relative=True),
"run": test.run,
- "stdout": test.output.stdout,
- "stderr": test.output.stderr,
- "exit_code": test.output.exit_code,
- "result": test.suite.GetOutcome(test),
- "expected": test.suite.GetExpectedOutcomes(test),
- "duration": test.duration,
+ "stdout": output.stdout,
+ "stderr": output.stderr,
+ "exit_code": output.exit_code,
+ "result": test.output_proc.get_outcome(output),
+ "expected": test.expected_outcomes,
+ "duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
- "target_name": test.suite.GetShellForTestCase(test),
+ "target_name": test.get_shell(),
"variant": test.variant,
})
@@ -384,6 +403,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
class FlakinessTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results):
+ super(FlakinessTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.results = {}
self.summary = {
@@ -405,28 +425,23 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
"version": 3,
}, f)
- def HasRun(self, test, has_unexpected_output):
- key = "/".join(
- sorted(flag.lstrip("-")
- for flag in self.runner.context.extra_flags + test.flags) +
- ["test", test.GetLabel()],
- )
- outcome = test.suite.GetOutcome(test)
+ def HasRun(self, test, output, has_unexpected_output):
+ key = test.get_id()
+ outcome = test.output_proc.get_outcome(output)
assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
if test.run == 1:
# First run of this test.
- expected_outcomes = test.suite.GetExpectedOutcomes(test)
self.results[key] = {
"actual": outcome,
- "expected": " ".join(expected_outcomes),
- "times": [test.duration],
+ "expected": " ".join(test.expected_outcomes),
+ "times": [output.duration],
}
self.summary[outcome] = self.summary[outcome] + 1
else:
# This is a rerun and a previous result exists.
result = self.results[key]
result["actual"] = "%s %s" % (result["actual"], outcome)
- result["times"].append(test.duration)
+ result["times"].append(output.duration)
PROGRESS_INDICATORS = {
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 7caf0711ca..988750d6b4 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -44,14 +44,13 @@ FAIL_SLOPPY = "FAIL_SLOPPY"
# Modifiers
SKIP = "SKIP"
SLOW = "SLOW"
-FAST_VARIANTS = "FAST_VARIANTS"
NO_VARIANTS = "NO_VARIANTS"
ALWAYS = "ALWAYS"
KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, FAST_VARIANTS, NO_VARIANTS,
- FAIL_SLOPPY, ALWAYS]:
+for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
+ ALWAYS]:
KEYWORDS[key] = key
# Support arches, modes to be written as keywords instead of strings.
@@ -67,31 +66,73 @@ for var in ["debug", "release", "big", "little",
for var in ALL_VARIANTS:
VARIABLES[var] = var
+class StatusFile(object):
+ def __init__(self, path, variables):
+ """
+ _rules: {variant: {test name: [rule]}}
+ _prefix_rules: {variant: {test name prefix: [rule]}}
+ """
+ with open(path) as f:
+ self._rules, self._prefix_rules = ReadStatusFile(f.read(), variables)
-def DoSkip(outcomes):
- return SKIP in outcomes
+ def get_outcomes(self, testname, variant=None):
+ """Merges variant dependent and independent rules."""
+ outcomes = frozenset()
+ for key in set([variant or '', '']):
+ rules = self._rules.get(key, {})
+ prefix_rules = self._prefix_rules.get(key, {})
-def IsSlow(outcomes):
- return SLOW in outcomes
+ if testname in rules:
+ outcomes |= rules[testname]
+ for prefix in prefix_rules:
+ if testname.startswith(prefix):
+ outcomes |= prefix_rules[prefix]
-def OnlyStandardVariant(outcomes):
- return NO_VARIANTS in outcomes
+ return outcomes
+ def warn_unused_rules(self, tests, check_variant_rules=False):
+ """Finds and prints unused rules in status file.
-def OnlyFastVariants(outcomes):
- return FAST_VARIANTS in outcomes
+ Rule X is unused when it doesn't apply to any tests, which can also mean
+ that all matching tests were skipped by another rule before evaluating X.
+ Args:
+ tests: list of pairs (testname, variant)
+ check_variant_rules: if set variant dependent rules are checked
+ """
-def IsPassOrFail(outcomes):
- return (PASS in outcomes and
- FAIL in outcomes and
- CRASH not in outcomes)
-
-
-def IsFailOk(outcomes):
- return FAIL_OK in outcomes
+ if check_variant_rules:
+ variants = list(ALL_VARIANTS)
+ else:
+ variants = ['']
+ used_rules = set()
+
+ for testname, variant in tests:
+ variant = variant or ''
+
+ if testname in self._rules.get(variant, {}):
+ used_rules.add((testname, variant))
+ if SKIP in self._rules[variant][testname]:
+ continue
+
+ for prefix in self._prefix_rules.get(variant, {}):
+ if testname.startswith(prefix):
+ used_rules.add((prefix, variant))
+ if SKIP in self._prefix_rules[variant][prefix]:
+ break
+
+ for variant in variants:
+ for rule, value in (
+ list(self._rules.get(variant, {}).iteritems()) +
+ list(self._prefix_rules.get(variant, {}).iteritems())):
+ if (rule, variant) not in used_rules:
+ if variant == '':
+ variant_desc = 'variant independent'
+ else:
+ variant_desc = 'variant: %s' % variant
+ print 'Unused rule: %s -> %s (%s)' % (rule, value, variant_desc)
def _JoinsPassAndFail(outcomes1, outcomes2):
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 946e89a3fc..6a9e9831ce 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -30,55 +30,65 @@ import fnmatch
import imp
import os
-from . import commands
+from . import command
from . import statusfile
from . import utils
-from ..objects import testcase
-from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
+from ..objects.testcase import TestCase
+from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
-FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
-class VariantGenerator(object):
+class LegacyVariantsGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
- self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
+ def FilterVariantsByTest(self, test):
+ if test.only_standard_variant:
+ return self.standard_variant
+ return self.all_variants
+
+ def GetFlagSets(self, test, variant):
+ return ALL_VARIANT_FLAGS[variant]
+
+
+class StandardLegacyVariantsGenerator(LegacyVariantsGenerator):
def FilterVariantsByTest(self, testcase):
- result = self.all_variants
- outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
- if outcomes:
- if statusfile.OnlyStandardVariant(outcomes):
- return self.standard_variant
- if statusfile.OnlyFastVariants(outcomes):
- result = self.fast_variants
- return result
-
- def GetFlagSets(self, testcase, variant):
- outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
- if outcomes and statusfile.OnlyFastVariants(outcomes):
- return FAST_VARIANT_FLAGS[variant]
- else:
- return ALL_VARIANT_FLAGS[variant]
+ return self.standard_variant
-class TestSuite(object):
+class VariantsGenerator(object):
+ def __init__(self, variants):
+ self._all_variants = [v for v in variants if v in ALL_VARIANTS]
+ self._standard_variant = [v for v in variants if v in STANDARD_VARIANT]
+
+ def gen(self, test):
+ """Generator producing (variant, flags, procid suffix) tuples."""
+ flags_set = self._get_flags_set(test)
+ for n, variant in enumerate(self._get_variants(test)):
+ yield (variant, flags_set[variant][0], n)
+
+ def _get_flags_set(self, test):
+ return ALL_VARIANT_FLAGS
+ def _get_variants(self, test):
+ if test.only_standard_variant:
+ return self._standard_variant
+ return self._all_variants
+
+
+class TestSuite(object):
@staticmethod
- def LoadTestSuite(root, global_init=True):
+ def LoadTestSuite(root):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description)
return module.GetSuite(name, root)
- except ImportError:
- # Use default if no testcfg is present.
- return GoogleTestSuite(name, root)
finally:
if f:
f.close()
@@ -88,69 +98,40 @@ class TestSuite(object):
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
- self.rules = None # {variant: {test name: [rule]}}
- self.prefix_rules = None # {variant: {test name prefix: [rule]}}
- self.total_duration = None # float, assigned on demand
-
- self._outcomes_cache = dict()
-
- def suffix(self):
- return ".js"
+ self.statusfile = None
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
- # Used in the status file and for stdout printing.
- def CommonTestName(self, testcase):
- if utils.IsWindows():
- return testcase.path.replace("\\", "/")
- else:
- return testcase.path
-
def ListTests(self, context):
raise NotImplementedError
- def _VariantGeneratorFactory(self):
+ def _LegacyVariantsGeneratorFactory(self):
"""The variant generator class to be used."""
- return VariantGenerator
+ return LegacyVariantsGenerator
- def CreateVariantGenerator(self, variants):
+ def CreateLegacyVariantsGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
- Returns: An object of type VariantGenerator.
+ Returns: An object of type LegacyVariantsGenerator.
"""
- return self._VariantGeneratorFactory()(self, set(variants))
+ return self._LegacyVariantsGeneratorFactory()(self, set(variants))
- def PrepareSources(self):
- """Called once before multiprocessing for doing file-system operations.
+ def get_variants_gen(self, variants):
+ return self._variants_gen_class()(variants)
- This should not access the network. For network access use the method
- below.
- """
- pass
+ def _variants_gen_class(self):
+ return VariantsGenerator
def ReadStatusFile(self, variables):
- with open(self.status_file()) as f:
- self.rules, self.prefix_rules = (
- statusfile.ReadStatusFile(f.read(), variables))
+ self.statusfile = statusfile.StatusFile(self.status_file(), variables)
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
- def GetStatusfileFlags(self, test):
- """Gets runtime flags from a status file.
-
- Every outcome that starts with "--" is a flag. Status file has to be loaded
- before using this function.
- """
- flags = []
- for outcome in self.GetStatusFileOutcomes(test):
- if outcome.startswith('--'):
- flags.append(outcome)
- return flags
def FilterTestCasesByStatus(self,
slow_tests_mode=None,
@@ -179,58 +160,16 @@ class TestSuite(object):
(mode == 'skip' and pass_fail))
def _compliant(test):
- outcomes = self.GetStatusFileOutcomes(test)
- if statusfile.DoSkip(outcomes):
+ if test.do_skip:
return False
- if _skip_slow(statusfile.IsSlow(outcomes), slow_tests_mode):
+ if _skip_slow(test.is_slow, slow_tests_mode):
return False
- if _skip_pass_fail(statusfile.IsPassOrFail(outcomes),
- pass_fail_tests_mode):
+ if _skip_pass_fail(test.is_pass_or_fail, pass_fail_tests_mode):
return False
return True
self.tests = filter(_compliant, self.tests)
- def WarnUnusedRules(self, check_variant_rules=False):
- """Finds and prints unused rules in status file.
-
- Rule X is unused when it doesn't apply to any tests, which can also mean
- that all matching tests were skipped by another rule before evaluating X.
-
- Status file has to be loaded before using this function.
- """
-
- if check_variant_rules:
- variants = list(ALL_VARIANTS)
- else:
- variants = ['']
- used_rules = set()
-
- for t in self.tests:
- testname = self.CommonTestName(t)
- variant = t.variant or ""
-
- if testname in self.rules.get(variant, {}):
- used_rules.add((testname, variant))
- if statusfile.DoSkip(self.rules[variant][testname]):
- continue
-
- for prefix in self.prefix_rules.get(variant, {}):
- if testname.startswith(prefix):
- used_rules.add((prefix, variant))
- if statusfile.DoSkip(self.prefix_rules[variant][prefix]):
- break
-
- for variant in variants:
- for rule, value in (list(self.rules.get(variant, {}).iteritems()) +
- list(self.prefix_rules.get(variant, {}).iteritems())):
- if (rule, variant) not in used_rules:
- if variant == '':
- variant_desc = 'variant independent'
- else:
- variant_desc = 'variant: %s' % variant
- print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
-
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
@@ -256,167 +195,14 @@ class TestSuite(object):
break
self.tests = filtered
- def GetExpectedOutcomes(self, testcase):
- """Gets expected outcomes from status file.
+ def _create_test(self, path, **kwargs):
+ test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
+ return test
- It differs from GetStatusFileOutcomes by selecting only outcomes that can
- be result of test execution.
- Status file has to be loaded before using this function.
- """
- outcomes = self.GetStatusFileOutcomes(testcase)
-
- expected = []
- if (statusfile.FAIL in outcomes or
- statusfile.FAIL_OK in outcomes):
- expected.append(statusfile.FAIL)
-
- if statusfile.CRASH in outcomes:
- expected.append(statusfile.CRASH)
-
- if statusfile.PASS in outcomes:
- expected.append(statusfile.PASS)
-
- return expected or [statusfile.PASS]
-
- def GetStatusFileOutcomes(self, testcase):
- """Gets outcomes from status file.
-
- Merges variant dependent and independent rules. Status file has to be loaded
- before using this function.
- """
- variant = testcase.variant or ''
- testname = self.CommonTestName(testcase)
- cache_key = '%s$%s' % (testname, variant)
-
- if cache_key not in self._outcomes_cache:
- # Load statusfile to get outcomes for the first time.
- assert(self.rules is not None)
- assert(self.prefix_rules is not None)
-
- outcomes = frozenset()
-
- for key in set([variant, '']):
- rules = self.rules.get(key, {})
- prefix_rules = self.prefix_rules.get(key, {})
-
- if testname in rules:
- outcomes |= rules[testname]
-
- for prefix in prefix_rules:
- if testname.startswith(prefix):
- outcomes |= prefix_rules[prefix]
-
- self._outcomes_cache[cache_key] = outcomes
-
- return self._outcomes_cache[cache_key]
-
- def GetShellForTestCase(self, testcase):
- """Returns shell to be executed for this test case."""
- return 'd8'
-
- def GetParametersForTestCase(self, testcase, context):
- """Returns a tuple of (files, flags, env) for this test case."""
+ def _test_class(self):
raise NotImplementedError
- def GetSourceForTest(self, testcase):
- return "(no source available)"
-
- def IsFailureOutput(self, testcase):
- return testcase.output.exit_code != 0
-
- def IsNegativeTest(self, testcase):
- return False
-
- def HasFailed(self, testcase):
- execution_failed = self.IsFailureOutput(testcase)
- if self.IsNegativeTest(testcase):
- return not execution_failed
- else:
- return execution_failed
-
- def GetOutcome(self, testcase):
- if testcase.output.HasCrashed():
- return statusfile.CRASH
- elif testcase.output.HasTimedOut():
- return statusfile.TIMEOUT
- elif self.HasFailed(testcase):
- return statusfile.FAIL
- else:
- return statusfile.PASS
-
- def HasUnexpectedOutput(self, testcase):
- return self.GetOutcome(testcase) not in self.GetExpectedOutcomes(testcase)
-
- def StripOutputForTransmit(self, testcase):
- if not self.HasUnexpectedOutput(testcase):
- testcase.output.stdout = ""
- testcase.output.stderr = ""
-
- def CalculateTotalDuration(self):
- self.total_duration = 0.0
- for t in self.tests:
- self.total_duration += t.duration
- return self.total_duration
-
-
-class StandardVariantGenerator(VariantGenerator):
- def FilterVariantsByTest(self, testcase):
- return self.standard_variant
-
-
-class GoogleTestSuite(TestSuite):
- def __init__(self, name, root):
- super(GoogleTestSuite, self).__init__(name, root)
-
- def ListTests(self, context):
- shell = os.path.abspath(
- os.path.join(context.shell_dir, self.GetShellForTestCase(None)))
+ def _path_to_name(self, path):
if utils.IsWindows():
- shell += ".exe"
-
- output = None
- for i in xrange(3): # Try 3 times in case of errors.
- cmd = (
- context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags
- )
- output = commands.Execute(cmd)
- if output.exit_code == 0:
- break
- print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
- print ' '.join(cmd)
- print "\nStdout:"
- print output.stdout
- print "\nStderr:"
- print output.stderr
- print "\nExit code: %d" % output.exit_code
- else:
- raise Exception("Test executable failed to list the tests.")
-
- tests = []
- test_case = ''
- for line in output.stdout.splitlines():
- test_desc = line.strip().split()[0]
- if test_desc.endswith('.'):
- test_case = test_desc
- elif test_case and test_desc:
- test = testcase.TestCase(self, test_case + test_desc)
- tests.append(test)
- tests.sort(key=lambda t: t.path)
- return tests
-
- def GetParametersForTestCase(self, testcase, context):
- flags = (
- testcase.flags +
- ["--gtest_filter=" + testcase.path] +
- ["--gtest_random_seed=%s" % context.random_seed] +
- ["--gtest_print_time=0"] +
- context.mode_flags)
- return [], flags, {}
-
- def _VariantGeneratorFactory(self):
- return StandardVariantGenerator
-
- def GetShellForTestCase(self, testcase):
- return self.name
+ return path.replace("\\", "/")
+ return path
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
index a8483b9fc0..efefe4c533 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -19,10 +19,6 @@ from testrunner.objects.testcase import TestCase
class TestSuiteTest(unittest.TestCase):
def test_filter_testcases_by_status_first_pass(self):
suite = TestSuite('foo', 'bar')
- suite.tests = [
- TestCase(suite, 'foo/bar'),
- TestCase(suite, 'baz/bar'),
- ]
suite.rules = {
'': {
'foo/bar': set(['PASS', 'SKIP']),
@@ -34,27 +30,22 @@ class TestSuiteTest(unittest.TestCase):
'baz/': set(['PASS', 'SLOW']),
},
}
+ suite.tests = [
+ TestCase(suite, 'foo/bar', 'foo/bar'),
+ TestCase(suite, 'baz/bar', 'baz/bar'),
+ ]
suite.FilterTestCasesByStatus()
self.assertEquals(
- [TestCase(suite, 'baz/bar')],
+ [TestCase(suite, 'baz/bar', 'baz/bar')],
suite.tests,
)
- outcomes = suite.GetStatusFileOutcomes(suite.tests[0])
+ outcomes = suite.GetStatusFileOutcomes(suite.tests[0].name,
+ suite.tests[0].variant)
self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
def test_filter_testcases_by_status_second_pass(self):
suite = TestSuite('foo', 'bar')
- test1 = TestCase(suite, 'foo/bar')
- test2 = TestCase(suite, 'baz/bar')
-
- suite.tests = [
- test1.CopyAddingFlags(variant='default', flags=[]),
- test1.CopyAddingFlags(variant='stress', flags=['-v']),
- test2.CopyAddingFlags(variant='default', flags=[]),
- test2.CopyAddingFlags(variant='stress', flags=['-v']),
- ]
-
suite.rules = {
'': {
'foo/bar': set(['PREV']),
@@ -78,30 +69,38 @@ class TestSuiteTest(unittest.TestCase):
'foo/': set(['PASS', 'SLOW']),
},
}
+
+ test1 = TestCase(suite, 'foo/bar', 'foo/bar')
+ test2 = TestCase(suite, 'baz/bar', 'baz/bar')
+ suite.tests = [
+ test1.create_variant(variant='default', flags=[]),
+ test1.create_variant(variant='stress', flags=['-v']),
+ test2.create_variant(variant='default', flags=[]),
+ test2.create_variant(variant='stress', flags=['-v']),
+ ]
+
suite.FilterTestCasesByStatus()
self.assertEquals(
[
- TestCase(suite, 'foo/bar', flags=['-v']),
- TestCase(suite, 'baz/bar'),
+ TestCase(suite, 'foo/bar', 'foo/bar').create_variant(None, ['-v']),
+ TestCase(suite, 'baz/bar', 'baz/bar'),
],
suite.tests,
)
self.assertEquals(
set(['PREV', 'PASS', 'SLOW']),
- suite.GetStatusFileOutcomes(suite.tests[0]),
+ suite.GetStatusFileOutcomes(suite.tests[0].name,
+ suite.tests[0].variant),
)
self.assertEquals(
set(['PREV', 'PASS', 'FAIL', 'SLOW']),
- suite.GetStatusFileOutcomes(suite.tests[1]),
+ suite.GetStatusFileOutcomes(suite.tests[1].name,
+ suite.tests[1].variant),
)
def test_fail_ok_outcome(self):
suite = TestSuite('foo', 'bar')
- suite.tests = [
- TestCase(suite, 'foo/bar'),
- TestCase(suite, 'baz/bar'),
- ]
suite.rules = {
'': {
'foo/bar': set(['FAIL_OK']),
@@ -109,10 +108,13 @@ class TestSuiteTest(unittest.TestCase):
},
}
suite.prefix_rules = {}
+ suite.tests = [
+ TestCase(suite, 'foo/bar', 'foo/bar'),
+ TestCase(suite, 'baz/bar', 'baz/bar'),
+ ]
for t in suite.tests:
- expected_outcomes = suite.GetExpectedOutcomes(t)
- self.assertEquals(['FAIL'], expected_outcomes)
+ self.assertEquals(['FAIL'], t.expected_outcomes)
if __name__ == '__main__':
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 3e79e44afa..bf8c3d9f7e 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -26,10 +26,10 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import os
from os.path import exists
from os.path import isdir
from os.path import join
+import os
import platform
import re
import subprocess
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index c8c7ce64a8..f1e9ad301e 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -4,38 +4,26 @@
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
+ "code_serializer": [["--cache=code"]],
"default": [[]],
"future": [["--future"]],
+ # Alias of exhaustive variants, but triggering new test framework features.
+ "infra_staging": [[]],
"liftoff": [["--liftoff"]],
- "stress": [["--stress-opt", "--always-opt"]],
- # TODO(6792): Write protected code has been temporary added to the below
- # variant until the feature has been enabled (or staged) by default.
- "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
- # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
- # would not force optimization too. It turns into a Nop. Please see
- # https://chromium-review.googlesource.com/c/452620/ for more discussion.
- "nooptimization": [["--noopt"]],
- "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
-}
-
-# FAST_VARIANTS implies no --always-opt.
-FAST_VARIANT_FLAGS = {
- "default": [[]],
- "future": [["--future"]],
- "liftoff": [["--liftoff"]],
- "stress": [["--stress-opt"]],
- # TODO(6792): Write protected code has been temporary added to the below
- # variant until the feature has been enabled (or staged) by default.
- "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
+ "minor_mc": [["--minor-mc"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
+ "slow_path": [["--force-slow-path"]],
+ "stress": [["--stress-opt", "--always-opt"]],
"stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+ "stress_incremental_marking": [["--stress-incremental-marking"]],
+ # Trigger stress sampling allocation profiler with sample interval = 2^14
+ "stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
+ "trusted": [["--no-untrusted-code-mitigations"]],
"wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
+ "wasm_no_native": [["--no-wasm-jit-to-native"]],
}
-ALL_VARIANTS = set(["default", "future", "liftoff", "stress",
- "stress_incremental_marking", "nooptimization",
- "stress_background_compile", "wasm_traps"])
+ALL_VARIANTS = set(ALL_VARIANT_FLAGS.keys())
diff --git a/deps/v8/tools/testrunner/local/verbose.py b/deps/v8/tools/testrunner/local/verbose.py
index f28398fa42..49e808588c 100644
--- a/deps/v8/tools/testrunner/local/verbose.py
+++ b/deps/v8/tools/testrunner/local/verbose.py
@@ -38,28 +38,30 @@ REPORT_TEMPLATE = (
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
- * %(fail)4d tests are expected to fail that we should fix""")
+ * %(fail)4d tests are expected to fail that we should fix
+ * %(crash)4d tests are expected to crash
+""")
+# TODO(majeski): Turn it into an observer.
def PrintReport(tests):
total = len(tests)
- skipped = nocrash = passes = fail_ok = fail = 0
+ skipped = nocrash = passes = fail_ok = fail = crash = 0
for t in tests:
- outcomes = t.suite.GetStatusFileOutcomes(t)
- if not outcomes:
- passes += 1
- continue
- if statusfile.DoSkip(outcomes):
+ if t.do_skip:
skipped += 1
- continue
- if statusfile.IsPassOrFail(outcomes):
+ elif t.is_pass_or_fail:
nocrash += 1
- if list(outcomes) == [statusfile.PASS]:
- passes += 1
- if statusfile.IsFailOk(outcomes):
+ elif t.is_fail_ok:
fail_ok += 1
- if list(outcomes) == [statusfile.FAIL]:
+ elif t.expected_outcomes == [statusfile.PASS]:
+ passes += 1
+ elif t.expected_outcomes == [statusfile.FAIL]:
fail += 1
+ elif t.expected_outcomes == [statusfile.CRASH]:
+ crash += 1
+ else:
+ assert False # Unreachable # TODO: check this in outcomes parsing phase.
print REPORT_TEMPLATE % {
"total": total,
@@ -67,18 +69,19 @@ def PrintReport(tests):
"nocrash": nocrash,
"pass": passes,
"fail_ok": fail_ok,
- "fail": fail
+ "fail": fail,
+ "crash": crash,
}
def PrintTestSource(tests):
for test in tests:
- suite = test.suite
- source = suite.GetSourceForTest(test).strip()
- if len(source) > 0:
- print "--- begin source: %s/%s ---" % (suite.name, test.path)
- print source
- print "--- end source: %s/%s ---" % (suite.name, test.path)
+ print "--- begin source: %s ---" % test
+ if test.is_source_available():
+ print test.get_source()
+ else:
+ print '(no source available)'
+ print "--- end source: %s ---" % test
def FormatTime(d):
@@ -86,16 +89,16 @@ def FormatTime(d):
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
-def PrintTestDurations(suites, overall_time):
+def PrintTestDurations(suites, outputs, overall_time):
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
- timed_tests = [ t for s in suites for t in s.tests
- if t.duration is not None ]
- timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
+ timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
+ if t in outputs]
+ timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
index = 1
- for entry in timed_tests[:20]:
- t = FormatTime(entry.duration)
- sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
+ for test, duration in timed_tests[:20]:
+ t = FormatTime(duration)
+ sys.stderr.write("%4i (%s) %s\n" % (index, t, test))
index += 1
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index fb5d717728..a3dd56d2dd 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -29,8 +29,8 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
- no_sorting, rerun_failures_count, rerun_failures_max,
- predictable, no_harness, use_perf_data, sancov_dir):
+ no_sorting, rerun_failures_count, rerun_failures_max, no_harness,
+ use_perf_data, sancov_dir, infra_staging=False):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -45,7 +45,7 @@ class Context():
self.no_sorting = no_sorting
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
- self.predictable = predictable
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir
+ self.infra_staging = infra_staging
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index 99d6137698..adc33c9f12 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -32,12 +32,13 @@ from ..local import utils
class Output(object):
- def __init__(self, exit_code, timed_out, stdout, stderr, pid):
+ def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.pid = pid
+ self.duration = duration
def HasCrashed(self):
if utils.IsWindows():
diff --git a/deps/v8/tools/testrunner/objects/predictable.py b/deps/v8/tools/testrunner/objects/predictable.py
new file mode 100644
index 0000000000..ad93077be9
--- /dev/null
+++ b/deps/v8/tools/testrunner/objects/predictable.py
@@ -0,0 +1,57 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from ..local import statusfile
+from ..outproc import base as outproc_base
+from ..testproc.result import Result
+
+
+# Only check the exit code of the predictable_wrapper in
+# verify-predictable mode. Negative tests are not supported as they
+# usually also don't print allocation hashes. There are two versions of
+# negative tests: one specified by the test, the other specified through
+# the status file (e.g. known bugs).
+
+
+def get_outproc(test):
+ output_proc = test.output_proc
+ if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
+ # TODO(majeski): Skip these tests instead of having special outproc.
+ return NeverUnexpectedOutputOutProc(output_proc)
+ return OutProc(output_proc)
+
+
+class OutProc(outproc_base.BaseOutProc):
+ """Output processor wrapper for predictable mode. It has custom process and
+ has_unexpected_output implementation, but for all other methods it simply
+ calls wrapped output processor.
+ """
+ def __init__(self, _outproc):
+ super(OutProc, self).__init__()
+ self._outproc = _outproc
+
+ def process(self, output):
+ return Result(self.has_unexpected_output(output), output)
+
+ def has_unexpected_output(self, output):
+ return output.exit_code != 0
+
+ def get_outcome(self, output):
+ return self._outproc.get_outcome(output)
+
+ @property
+ def negative(self):
+ return self._outproc.negative
+
+ @property
+ def expected_outcomes(self):
+ return self._outproc.expected_outcomes
+
+
+class NeverUnexpectedOutputOutProc(OutProc):
+ """Output processor wrapper for tests that we will return False for
+ has_unexpected_output in the predictable mode.
+ """
+ def has_unexpected_output(self, output):
+ return False
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index fd8c27bc59..06db32802c 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -25,45 +25,274 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import copy
+import os
+import re
+import shlex
+
+from ..outproc import base as outproc
+from ..local import command
+from ..local import statusfile
+from ..local import utils
+
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+
+
class TestCase(object):
- def __init__(self, suite, path, variant=None, flags=None):
+ def __init__(self, suite, path, name):
self.suite = suite # TestSuite object
+
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
- self.flags = flags or [] # list of strings, flags specific to this test
- self.variant = variant # name of the used testing variant
- self.output = None
+ self.name = name # string that identifies test in the status file
+
+ self.variant = None # name of the used testing variant
+ self.variant_flags = [] # list of strings, flags specific to this test
+
self.id = None # int, used to map result back to TestCase instance
- self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
+ self.cmd = None
+
+ # Fields used by the test processors.
+ self.origin = None # Test that this test is subtest of.
+ self.processor = None # Processor that created this subtest.
+ self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
+ self.keep_output = False # Can output of this test be dropped
+
+ self._statusfile_outcomes = None
+ self._expected_outcomes = None # optimization: None == [statusfile.PASS]
+ self._statusfile_flags = None
+ self._prepare_outcomes()
+
+ def create_subtest(self, processor, subtest_id, variant=None, flags=None,
+ keep_output=False):
+ subtest = copy.copy(self)
+ subtest.origin = self
+ subtest.processor = processor
+ subtest.procid += '.%s' % subtest_id
+ subtest.keep_output = keep_output
+ if variant is not None:
+ assert self.variant is None
+ subtest.variant = variant
+ subtest.variant_flags = flags
+ subtest._prepare_outcomes()
+ return subtest
+
+ def create_variant(self, variant, flags, procid_suffix=None):
+ """Makes a shallow copy of the object and updates variant, variant flags and
+ all fields that depend on it, e.g. expected outcomes.
+
+ Args
+ variant - variant name
+ flags - flags that should be added to origin test's variant flags
+ procid_suffix - for multiple variants with the same name set suffix to
+ keep procid unique.
+ """
+ other = copy.copy(self)
+ if not self.variant_flags:
+ other.variant_flags = flags
+ else:
+ other.variant_flags = self.variant_flags + flags
+ other.variant = variant
+ if procid_suffix:
+ other.procid += '[%s-%s]' % (variant, procid_suffix)
+ else:
+ other.procid += '[%s]' % variant
+
+ other._prepare_outcomes(variant != self.variant)
+
+ return other
+
+ def _prepare_outcomes(self, force_update=True):
+ if force_update or self._statusfile_outcomes is None:
+ def is_flag(outcome):
+ return outcome.startswith('--')
+ def not_flag(outcome):
+ return not is_flag(outcome)
+
+ outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
+ self._statusfile_outcomes = filter(not_flag, outcomes)
+ self._statusfile_flags = filter(is_flag, outcomes)
+ self.expected_outcomes = (
+ self._parse_status_file_outcomes(self._statusfile_outcomes))
+
+ def _parse_status_file_outcomes(self, outcomes):
+ if (statusfile.FAIL_SLOPPY in outcomes and
+ '--use-strict' not in self.variant_flags):
+ return outproc.OUTCOMES_FAIL
+
+ expected_outcomes = []
+ if (statusfile.FAIL in outcomes or
+ statusfile.FAIL_OK in outcomes):
+ expected_outcomes.append(statusfile.FAIL)
+ if statusfile.CRASH in outcomes:
+ expected_outcomes.append(statusfile.CRASH)
+
+ # Do not add PASS if there is nothing else. Empty outcomes are converted to
+ # the global [PASS].
+ if expected_outcomes and statusfile.PASS in outcomes:
+ expected_outcomes.append(statusfile.PASS)
+
+ # Avoid creating multiple instances of a list with a single FAIL.
+ if expected_outcomes == outproc.OUTCOMES_FAIL:
+ return outproc.OUTCOMES_FAIL
+ return expected_outcomes or outproc.OUTCOMES_PASS
+
+ @property
+ def do_skip(self):
+ return statusfile.SKIP in self._statusfile_outcomes
+
+ @property
+ def is_slow(self):
+ return statusfile.SLOW in self._statusfile_outcomes
+
+ @property
+ def is_fail_ok(self):
+ return statusfile.FAIL_OK in self._statusfile_outcomes
- def CopyAddingFlags(self, variant, flags):
- return TestCase(self.suite, self.path, variant, self.flags + flags)
+ @property
+ def is_pass_or_fail(self):
+ return (statusfile.PASS in self._statusfile_outcomes and
+ statusfile.FAIL in self._statusfile_outcomes and
+ statusfile.CRASH not in self._statusfile_outcomes)
- def SetSuiteObject(self, suites):
- self.suite = suites[self.suite]
+ @property
+ def only_standard_variant(self):
+ return statusfile.NO_VARIANTS in self._statusfile_outcomes
- def suitename(self):
- return self.suite.name
+ def get_command(self, context):
+ params = self._get_cmd_params(context)
+ env = self._get_cmd_env()
+ shell, shell_flags = self._get_shell_with_flags(context)
+ timeout = self._get_timeout(params, context.timeout)
+ return self._create_cmd(shell, shell_flags + params, env, timeout, context)
- def GetLabel(self):
- return self.suitename() + "/" + self.suite.CommonTestName(self)
+ def _get_cmd_params(self, ctx):
+ """Gets command parameters and combines them in the following order:
+ - files [empty by default]
+ - extra flags (from command line)
+ - user flags (variant/fuzzer flags)
+ - statusfile flags
+ - mode flags (based on chosen mode)
+ - source flags (from source code) [empty by default]
- def __getstate__(self):
- """Representation to pickle test cases.
+ The best way to modify how parameters are created is to only override
+ methods for getting partial parameters.
+ """
+ return (
+ self._get_files_params(ctx) +
+ self._get_extra_flags(ctx) +
+ self._get_variant_flags() +
+ self._get_statusfile_flags() +
+ self._get_mode_flags(ctx) +
+ self._get_source_flags() +
+ self._get_suite_flags(ctx)
+ )
+
+ def _get_cmd_env(self):
+ return {}
+
+ def _get_files_params(self, ctx):
+ return []
+
+ def _get_extra_flags(self, ctx):
+ return ctx.extra_flags
+
+ def _get_variant_flags(self):
+ return self.variant_flags
- The original suite won't be sent beyond process boundaries. Instead
- send the name only and retrieve a process-local suite later.
+ def _get_statusfile_flags(self):
+ """Gets runtime flags from a status file.
+
+ Every outcome that starts with "--" is a flag.
"""
- return dict(self.__dict__, suite=self.suite.name)
+ return self._statusfile_flags
+
+ def _get_mode_flags(self, ctx):
+ return ctx.mode_flags
+
+ def _get_source_flags(self):
+ return []
+
+ def _get_suite_flags(self, ctx):
+ return []
+
+ def _get_shell_with_flags(self, ctx):
+ shell = self.get_shell()
+ shell_flags = []
+ if shell == 'd8':
+ shell_flags.append('--test')
+ if utils.IsWindows():
+ shell += '.exe'
+ if ctx.random_seed:
+ shell_flags.append('--random-seed=%s' % ctx.random_seed)
+ return shell, shell_flags
+
+ def _get_timeout(self, params, timeout):
+ if "--stress-opt" in params:
+ timeout *= 4
+ if "--noenable-vfp3" in params:
+ timeout *= 2
+
+ # TODO(majeski): make it slow outcome dependent.
+ timeout *= 2
+ return timeout
+
+ def get_shell(self):
+ return 'd8'
+
+ def _get_suffix(self):
+ return '.js'
+
+ def _create_cmd(self, shell, params, env, timeout, ctx):
+ return command.Command(
+ cmd_prefix=ctx.command_prefix,
+ shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
+ args=params,
+ env=env,
+ timeout=timeout,
+ verbose=ctx.verbose
+ )
+
+ def _parse_source_flags(self, source=None):
+ source = source or self.get_source()
+ flags = []
+ for match in re.findall(FLAGS_PATTERN, source):
+ flags += shlex.split(match.strip())
+ return flags
+
+ def is_source_available(self):
+ return self._get_source_path() is not None
+
+ def get_source(self):
+ with open(self._get_source_path()) as f:
+ return f.read()
+
+ def _get_source_path(self):
+ return None
+
+ @property
+ def output_proc(self):
+ if self.expected_outcomes is outproc.OUTCOMES_PASS:
+ return outproc.DEFAULT
+ return outproc.OutProc(self.expected_outcomes)
def __cmp__(self, other):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
- (self.suite.name, self.path, self.flags),
- (other.suite.name, other.path, other.flags),
+ (self.suite.name, self.name, self.variant_flags),
+ (other.suite.name, other.name, other.variant_flags)
)
+ def __hash__(self):
+ return hash((self.suite.name, self.name, ''.join(self.variant_flags)))
+
def __str__(self):
- return "[%s/%s %s]" % (self.suite.name, self.path, self.flags)
+ return self.suite.name + '/' + self.name
+
+ # TODO(majeski): Rename `id` field or `get_id` function since they're
+ # unrelated.
+ def get_id(self):
+ return '%s/%s %s' % (
+ self.suite.name, self.name, ' '.join(self.variant_flags))
diff --git a/deps/v8/tools/testrunner/outproc/__init__.py b/deps/v8/tools/testrunner/outproc/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
new file mode 100644
index 0000000000..9a9db4e81d
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -0,0 +1,166 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import itertools
+
+from ..local import statusfile
+from ..testproc.result import Result
+
+
+OUTCOMES_PASS = [statusfile.PASS]
+OUTCOMES_FAIL = [statusfile.FAIL]
+
+
+class BaseOutProc(object):
+ def process(self, output):
+ return Result(self.has_unexpected_output(output), output)
+
+ def has_unexpected_output(self, output):
+ return self.get_outcome(output) not in self.expected_outcomes
+
+ def get_outcome(self, output):
+ if output.HasCrashed():
+ return statusfile.CRASH
+ elif output.HasTimedOut():
+ return statusfile.TIMEOUT
+ elif self._has_failed(output):
+ return statusfile.FAIL
+ else:
+ return statusfile.PASS
+
+ def _has_failed(self, output):
+ execution_failed = self._is_failure_output(output)
+ if self.negative:
+ return not execution_failed
+ return execution_failed
+
+ def _is_failure_output(self, output):
+ return output.exit_code != 0
+
+ @property
+ def negative(self):
+ return False
+
+ @property
+ def expected_outcomes(self):
+ raise NotImplementedError()
+
+
+class Negative(object):
+ @property
+ def negative(self):
+ return True
+
+
+class PassOutProc(BaseOutProc):
+ """Output processor optimized for positive tests expected to PASS."""
+ def has_unexpected_output(self, output):
+ return self.get_outcome(output) != statusfile.PASS
+
+ @property
+ def expected_outcomes(self):
+ return OUTCOMES_PASS
+
+
+class OutProc(BaseOutProc):
+ """Output processor optimized for positive tests with expected outcomes
+ different than a single PASS.
+ """
+ def __init__(self, expected_outcomes):
+ self._expected_outcomes = expected_outcomes
+
+ @property
+ def expected_outcomes(self):
+ return self._expected_outcomes
+
+ # TODO(majeski): Inherit from PassOutProc in case of OUTCOMES_PASS and remove
+ # custom get/set state.
+ def __getstate__(self):
+ d = self.__dict__
+ if self._expected_outcomes is OUTCOMES_PASS:
+ d = d.copy()
+ del d['_expected_outcomes']
+ return d
+
+ def __setstate__(self, d):
+ if '_expected_outcomes' not in d:
+ d['_expected_outcomes'] = OUTCOMES_PASS
+ self.__dict__.update(d)
+
+
+# TODO(majeski): Override __reduce__ to make it deserialize as one instance.
+DEFAULT = PassOutProc()
+
+
+class ExpectedOutProc(OutProc):
+ """Output processor that has is_failure_output depending on comparing the
+ output with the expected output.
+ """
+ def __init__(self, expected_outcomes, expected_filename):
+ super(ExpectedOutProc, self).__init__(expected_outcomes)
+ self._expected_filename = expected_filename
+
+ def _is_failure_output(self, output):
+ with open(self._expected_filename, 'r') as f:
+ expected_lines = f.readlines()
+
+ for act_iterator in self._act_block_iterator(output):
+ for expected, actual in itertools.izip_longest(
+ self._expected_iterator(expected_lines),
+ act_iterator,
+ fillvalue=''
+ ):
+ if expected != actual:
+ return True
+ return False
+
+ def _act_block_iterator(self, output):
+ """Iterates over blocks of actual output lines."""
+ lines = output.stdout.splitlines()
+ start_index = 0
+ found_eqeq = False
+ for index, line in enumerate(lines):
+ # If a stress test separator is found:
+ if line.startswith('=='):
+ # Iterate over all lines before a separator except the first.
+ if not found_eqeq:
+ found_eqeq = True
+ else:
+ yield self._actual_iterator(lines[start_index:index])
+ # The next block of output lines starts after the separator.
+ start_index = index + 1
+ # Iterate over complete output if no separator was found.
+ if not found_eqeq:
+ yield self._actual_iterator(lines)
+
+ def _actual_iterator(self, lines):
+ return self._iterator(lines, self._ignore_actual_line)
+
+ def _expected_iterator(self, lines):
+ return self._iterator(lines, self._ignore_expected_line)
+
+ def _ignore_actual_line(self, line):
+ """Ignore empty lines, valgrind output, Android output and trace
+ incremental marking output.
+ """
+ if not line:
+ return True
+ return (line.startswith('==') or
+ line.startswith('**') or
+ line.startswith('ANDROID') or
+ line.startswith('###') or
+ # FIXME(machenbach): The test driver shouldn't try to use slow
+ # asserts if they weren't compiled. This fails in optdebug=2.
+ line == 'Warning: unknown flag --enable-slow-asserts.' or
+ line == 'Try --help for options')
+
+ def _ignore_expected_line(self, line):
+ return not line
+
+ def _iterator(self, lines, ignore_predicate):
+ for line in lines:
+ line = line.strip()
+ if not ignore_predicate(line):
+ yield line
diff --git a/deps/v8/tools/testrunner/outproc/message.py b/deps/v8/tools/testrunner/outproc/message.py
new file mode 100644
index 0000000000..bbfc1cdf7e
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/message.py
@@ -0,0 +1,56 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import os
+import re
+
+from . import base
+
+
+class OutProc(base.OutProc):
+ def __init__(self, expected_outcomes, basepath, expected_fail):
+ super(OutProc, self).__init__(expected_outcomes)
+ self._basepath = basepath
+ self._expected_fail = expected_fail
+
+ def _is_failure_output(self, output):
+ fail = output.exit_code != 0
+ if fail != self._expected_fail:
+ return True
+
+ expected_lines = []
+ # Can't use utils.ReadLinesFrom() here because it strips whitespace.
+ with open(self._basepath + '.out') as f:
+ for line in f:
+ if line.startswith("#") or not line.strip():
+ continue
+ expected_lines.append(line)
+ raw_lines = output.stdout.splitlines()
+ actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
+ if len(expected_lines) != len(actual_lines):
+ return True
+
+ env = {
+ 'basename': os.path.basename(self._basepath + '.js'),
+ }
+ for (expected, actual) in itertools.izip_longest(
+ expected_lines, actual_lines, fillvalue=''):
+ pattern = re.escape(expected.rstrip() % env)
+ pattern = pattern.replace('\\*', '.*')
+ pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
+ pattern = '^%s$' % pattern
+ if not re.match(pattern, actual):
+ return True
+ return False
+
+ def _ignore_line(self, string):
+ """Ignore empty lines, valgrind output, Android output."""
+ return (
+ not string or
+ not string.strip() or
+ string.startswith("==") or
+ string.startswith("**") or
+ string.startswith("ANDROID")
+ )
diff --git a/deps/v8/tools/testrunner/outproc/mkgrokdump.py b/deps/v8/tools/testrunner/outproc/mkgrokdump.py
new file mode 100644
index 0000000000..8efde1226f
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/mkgrokdump.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import difflib
+
+from . import base
+
+
+class OutProc(base.OutProc):
+ def __init__(self, expected_outcomes, expected_path):
+ super(OutProc, self).__init__(expected_outcomes)
+ self._expected_path = expected_path
+
+ def _is_failure_output(self, output):
+ with open(self._expected_path) as f:
+ expected = f.read()
+ expected_lines = expected.splitlines()
+ actual_lines = output.stdout.splitlines()
+ diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
+ fromfile="expected_path")
+ diffstring = '\n'.join(diff)
+ if diffstring is not "":
+ if "generated from a non-shipping build" in output.stdout:
+ return False
+ if not "generated from a shipping build" in output.stdout:
+ output.stdout = "Unexpected output:\n\n" + output.stdout
+ return True
+ output.stdout = diffstring
+ return True
+ return False
diff --git a/deps/v8/tools/testrunner/outproc/mozilla.py b/deps/v8/tools/testrunner/outproc/mozilla.py
new file mode 100644
index 0000000000..1400d0ec54
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/mozilla.py
@@ -0,0 +1,33 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+def _is_failure_output(self, output):
+ return (
+ output.exit_code != 0 or
+ 'FAILED!' in output.stdout
+ )
+
+
+class OutProc(base.OutProc):
+ """Optimized for positive tests."""
+OutProc._is_failure_output = _is_failure_output
+
+
+class PassOutProc(base.PassOutProc):
+ """Optimized for positive tests expected to PASS."""
+PassOutProc._is_failure_output = _is_failure_output
+
+
+class NegOutProc(base.Negative, OutProc):
+ pass
+
+class NegPassOutProc(base.Negative, PassOutProc):
+ pass
+
+
+MOZILLA_PASS_DEFAULT = PassOutProc()
+MOZILLA_PASS_NEGATIVE = NegPassOutProc()
diff --git a/deps/v8/tools/testrunner/outproc/test262.py b/deps/v8/tools/testrunner/outproc/test262.py
new file mode 100644
index 0000000000..b5eb5547c3
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/test262.py
@@ -0,0 +1,54 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from . import base
+
+
+class ExceptionOutProc(base.OutProc):
+ """Output processor for tests with expected exception."""
+ def __init__(self, expected_outcomes, expected_exception=None):
+ super(ExceptionOutProc, self).__init__(expected_outcomes)
+ self._expected_exception = expected_exception
+
+ def _is_failure_output(self, output):
+ if output.exit_code != 0:
+ return True
+ if self._expected_exception != self._parse_exception(output.stdout):
+ return True
+ return 'FAILED!' in output.stdout
+
+ def _parse_exception(self, string):
+ # somefile:somelinenumber: someerror[: sometext]
+ # somefile might include an optional drive letter on windows e.g. "e:".
+ match = re.search(
+ '^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', string, re.MULTILINE)
+ if match:
+ return match.group(1).strip()
+ else:
+ return None
+
+
+def _is_failure_output(self, output):
+ return (
+ output.exit_code != 0 or
+ 'FAILED!' in output.stdout
+ )
+
+
+class NoExceptionOutProc(base.OutProc):
+ """Output processor optimized for tests without expected exception."""
+NoExceptionOutProc._is_failure_output = _is_failure_output
+
+
+class PassNoExceptionOutProc(base.PassOutProc):
+ """
+ Output processor optimized for tests expected to PASS without expected
+ exception.
+ """
+PassNoExceptionOutProc._is_failure_output = _is_failure_output
+
+
+PASS_NO_EXCEPTION = PassNoExceptionOutProc()
diff --git a/deps/v8/tools/testrunner/outproc/webkit.py b/deps/v8/tools/testrunner/outproc/webkit.py
new file mode 100644
index 0000000000..290e67dc5a
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/webkit.py
@@ -0,0 +1,18 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class OutProc(base.ExpectedOutProc):
+ def _is_failure_output(self, output):
+ if output.exit_code != 0:
+ return True
+ return super(OutProc, self)._is_failure_output(output)
+
+ def _ignore_expected_line(self, line):
+ return (
+ line.startswith('#') or
+ super(OutProc, self)._ignore_expected_line(line)
+ )
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index d838df783c..3be2099252 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -25,6 +25,15 @@ from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
+from testrunner.objects import predictable
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import (VerboseProgressIndicator,
+ ResultsTracker,
+ TestsCounter)
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.variant import VariantProc
TIMEOUT_DEFAULT = 60
@@ -48,7 +57,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ("more" first - it has the longer running tests).
"exhaustive": MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- "extra": ["future", "liftoff"],
+ "extra": ["future", "liftoff", "trusted"],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
@@ -66,14 +75,20 @@ SLOW_ARCHS = ["arm",
"s390x",
"arm64"]
+PREDICTABLE_WRAPPER = os.path.join(
+ base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
+
class StandardTestRunner(base_runner.BaseTestRunner):
- def __init__(self):
- super(StandardTestRunner, self).__init__()
+ def __init__(self, *args, **kwargs):
+ super(StandardTestRunner, self).__init__(*args, **kwargs)
self.sancov_dir = None
- def _do_execute(self, options, args):
+ def _get_default_suite_names(self):
+ return ['default']
+
+ def _do_execute(self, suites, args, options):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
@@ -89,42 +104,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
except Exception:
pass
- suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
-
- # Use default tests if no test configuration was provided at the cmd line.
- if len(args) == 0:
- args = ["default"]
-
- # Expand arguments with grouped tests. The args should reflect the list
- # of suites as otherwise filters would break.
- def ExpandTestGroups(name):
- if name in base_runner.TEST_MAP:
- return [suite for suite in base_runner.TEST_MAP[name]]
- else:
- return [name]
- args = reduce(lambda x, y: x + y,
- [ExpandTestGroups(arg) for arg in args],
- [])
-
- args_suites = OrderedDict() # Used as set
- for arg in args:
- args_suites[arg.split('/')[0]] = True
- suite_paths = [ s for s in args_suites if s in suite_paths ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(base_runner.BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- for s in suites:
- s.PrepareSources()
-
- try:
- return self._execute(args, options, suites)
- except KeyboardInterrupt:
- return 2
+ return self._execute(args, options, suites)
def _add_parser_options(self, parser):
parser.add_option("--sancov-dir",
@@ -154,6 +134,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
action="append", default=[])
+ parser.add_option("--infra-staging", help="Use new test runner features",
+ default=False, action="store_true")
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
@@ -200,12 +182,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
- parser.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
@@ -253,13 +229,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if options.novfp3:
options.extra_flags.append("--noenable-vfp3")
- if options.no_variants:
+ if options.no_variants: # pragma: no cover
print ("Option --no-variants is deprecated. "
"Pass --variants=default instead.")
assert not options.variants
options.variants = "default"
- if options.exhaustive_variants:
+ if options.exhaustive_variants: # pragma: no cover
# TODO(machenbach): Switch infra to --variants=exhaustive after M65.
print ("Option --exhaustive-variants is deprecated. "
"Pass --variants=exhaustive instead.")
@@ -280,6 +256,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
+ # Add predictable wrapper to command prefix.
+ options.command_prefix = (
+ [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
@@ -295,6 +274,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# Use developer defaults if no variant was specified.
options.variants = options.variants or "dev"
+ if options.variants == "infra_staging":
+ options.variants = "exhaustive"
+ options.infra_staging = True
+
# Resolve variant aliases and dedupe.
# TODO(machenbach): Don't mutate global variable. Rather pass mutated
# version as local variable.
@@ -308,7 +291,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print "All variants must be in %s" % str(ALL_VARIANTS)
raise base_runner.TestRunnerError()
- def CheckTestMode(name, option):
+ def CheckTestMode(name, option): # pragma: no cover
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
raise base_runner.TestRunnerError()
@@ -317,6 +300,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.no_i18n:
base_runner.TEST_MAP["bot_default"].remove("intl")
base_runner.TEST_MAP["default"].remove("intl")
+ # TODO(machenbach): uncomment after infra side lands.
+ # base_runner.TEST_MAP["d8_default"].remove("intl")
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
@@ -366,10 +351,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
- self.build_config.predictable,
options.no_harness,
use_perf_data=not options.swarming,
- sancov_dir=self.sancov_dir)
+ sancov_dir=self.sancov_dir,
+ infra_staging=options.infra_staging)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
@@ -405,6 +390,31 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
+
+ progress_indicator = progress.IndicatorNotifier()
+ progress_indicator.Register(
+ progress.PROGRESS_INDICATORS[options.progress]())
+ if options.junitout: # pragma: no cover
+ progress_indicator.Register(progress.JUnitTestProgressIndicator(
+ options.junitout, options.junittestsuite))
+ if options.json_test_results:
+ progress_indicator.Register(progress.JsonTestProgressIndicator(
+ options.json_test_results,
+ self.build_config.arch,
+ self.mode_options.execution_mode,
+ ctx.random_seed))
+ if options.flakiness_results: # pragma: no cover
+ progress_indicator.Register(progress.FlakinessTestProgressIndicator(
+ options.flakiness_results))
+
+ if options.infra_staging:
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+
+ return self._run_test_procs(suites, args, options, progress_indicator,
+ ctx)
+
all_tests = []
num_tests = 0
for s in suites:
@@ -417,14 +427,15 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# First filtering by status applying the generic rules (tests without
# variants)
if options.warn_unused:
- s.WarnUnusedRules(check_variant_rules=False)
+ tests = [(t.name, t.variant) for t in s.tests]
+ s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
- variant_gen = s.CreateVariantGenerator(VARIANTS)
- variant_tests = [ t.CopyAddingFlags(v, flags)
+ variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
+ variant_tests = [ t.create_variant(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
@@ -440,22 +451,24 @@ class StandardTestRunner(base_runner.BaseTestRunner):
else:
yield ["--random-seed=%d" % self._random_seed()]
s.tests = [
- t.CopyAddingFlags(t.variant, flags)
+ t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
for t in variant_tests
- for flags in iter_seed_flags()
+ for n, flags in enumerate(iter_seed_flags())
]
else:
s.tests = variant_tests
# Second filtering by status applying also the variant-dependent rules.
if options.warn_unused:
- s.WarnUnusedRules(check_variant_rules=True)
+ tests = [(t.name, t.variant) for t in s.tests]
+ s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
+
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+ s.tests = self._shard_tests(s.tests, options)
for t in s.tests:
- t.flags += s.GetStatusfileFlags(t)
+ t.cmd = t.get_command(ctx)
- s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
@@ -466,28 +479,19 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# Run the tests.
start_time = time.time()
- progress_indicator = progress.IndicatorNotifier()
- progress_indicator.Register(
- progress.PROGRESS_INDICATORS[options.progress]())
- if options.junitout:
- progress_indicator.Register(progress.JUnitTestProgressIndicator(
- options.junitout, options.junittestsuite))
- if options.json_test_results:
- progress_indicator.Register(progress.JsonTestProgressIndicator(
- options.json_test_results,
- self.build_config.arch,
- self.mode_options.execution_mode,
- ctx.random_seed))
- if options.flakiness_results:
- progress_indicator.Register(progress.FlakinessTestProgressIndicator(
- options.flakiness_results))
- runner = execution.Runner(suites, progress_indicator, ctx)
+ if self.build_config.predictable:
+ outproc_factory = predictable.get_outproc
+ else:
+ outproc_factory = None
+
+ runner = execution.Runner(suites, progress_indicator, ctx,
+ outproc_factory)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
- verbose.PrintTestDurations(suites, overall_duration)
+ verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")
@@ -503,8 +507,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print "Merging sancov files."
subprocess.check_call([
sys.executable,
- join(
- base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
+ join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % self.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
@@ -513,33 +516,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
return exit_code
def _shard_tests(self, tests, options):
- # Read gtest shard configuration from environment (e.g. set by swarming).
- # If none is present, use values passed on the command line.
- shard_count = int(
- os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
- shard_run = os.environ.get('GTEST_SHARD_INDEX')
- if shard_run is not None:
- # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
- shard_run = int(shard_run) + 1
- else:
- shard_run = options.shard_run
-
- if options.shard_count > 1:
- # Log if a value was passed on the cmd line and it differs from the
- # environment variables.
- if options.shard_count != shard_count:
- print("shard_count from cmd line differs from environment variable "
- "GTEST_TOTAL_SHARDS")
- if options.shard_run > 1 and options.shard_run != shard_run:
- print("shard_run from cmd line differs from environment variable "
- "GTEST_SHARD_INDEX")
+ shard_run, shard_count = self._get_shard_info(options)
if shard_count < 2:
return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
count = 0
shard = []
for test in tests:
@@ -548,6 +528,72 @@ class StandardTestRunner(base_runner.BaseTestRunner):
count += 1
return shard
+ def _run_test_procs(self, suites, args, options, progress_indicator,
+ context):
+ jobs = options.j
+
+ print '>>> Running with test processors'
+ loader = LoadProc()
+ tests_counter = TestsCounter()
+ results = ResultsTracker()
+ indicators = progress_indicator.ToProgressIndicatorProcs()
+ execproc = ExecutionProc(jobs, context)
+
+ procs = [
+ loader,
+ NameFilterProc(args) if args else None,
+ StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+ self._create_shard_proc(options),
+ tests_counter,
+ VariantProc(VARIANTS),
+ StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+ ] + indicators + [
+ results,
+ self._create_rerun_proc(context),
+ execproc,
+ ]
+
+ procs = filter(None, procs)
+
+ for i in xrange(0, len(procs) - 1):
+ procs[i].connect_to(procs[i + 1])
+
+ tests = [t for s in suites for t in s.tests]
+ tests.sort(key=lambda t: t.is_slow, reverse=True)
+
+ loader.setup()
+ loader.load_tests(tests)
+
+ print '>>> Running %d base tests' % tests_counter.total
+ tests_counter.remove_from_chain()
+
+ execproc.start()
+
+ for indicator in indicators:
+ indicator.finished()
+
+ print '>>> %d tests ran' % results.total
+
+ exit_code = 0
+ if results.failed:
+ exit_code = 1
+ if results.remaining:
+ exit_code = 2
+
+
+ if exit_code == 1 and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file "
+ "generated with failure information.")
+ exit_code = 0
+ return exit_code
+
+ def _create_rerun_proc(self, ctx):
+ if not ctx.rerun_failures_count:
+ return None
+ return RerunProc(ctx.rerun_failures_count,
+ ctx.rerun_failures_max)
+
+
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
diff --git a/deps/v8/tools/testrunner/testproc/__init__.py b/deps/v8/tools/testrunner/testproc/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
new file mode 100644
index 0000000000..1a87dbed55
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -0,0 +1,207 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from .result import SKIPPED
+
+
+"""
+Pipeline
+
+Test processors are chained together and communicate with each other by
+calling previous/next processor in the chain.
+ ----next_test()----> ----next_test()---->
+Proc1 Proc2 Proc3
+ <---result_for()---- <---result_for()----
+
+For every next_test there is exactly one result_for call.
+If processor ignores the test it has to return SkippedResult.
+If it created multiple subtests for one test and wants to pass all of them to
+the previous processor it can enclose them in GroupedResult.
+
+
+Subtests
+
+When test processor needs to modify the test or create some variants of the
+test it creates subtests and sends them to the next processor.
+Each subtest has:
+- procid - globally unique id that should contain id of the parent test and
+ some suffix given by test processor, e.g. its name + subtest type.
+- processor - which created it
+- origin - pointer to the parent (sub)test
+"""
+
+
+DROP_RESULT = 0
+DROP_OUTPUT = 1
+DROP_PASS_OUTPUT = 2
+DROP_PASS_STDOUT = 3
+
+def get_reduce_result_function(requirement):
+ if requirement == DROP_RESULT:
+ return lambda _: None
+
+ if requirement == DROP_OUTPUT:
+ def f(result):
+ result.output = None
+ return result
+ return f
+
+ if requirement == DROP_PASS_OUTPUT:
+ def f(result):
+ if not result.has_unexpected_output:
+ result.output = None
+ return result
+ return f
+
+ if requirement == DROP_PASS_STDOUT:
+ def f(result):
+ if not result.has_unexpected_output:
+ result.output.stdout = None
+ result.output.stderr = None
+ return result
+ return f
+
+
+class TestProc(object):
+ def __init__(self):
+ self._prev_proc = None
+ self._next_proc = None
+ self._requirement = DROP_RESULT
+ self._prev_requirement = None
+ self._reduce_result = lambda result: result
+
+ def connect_to(self, next_proc):
+ """Puts `next_proc` after itself in the chain."""
+ next_proc._prev_proc = self
+ self._next_proc = next_proc
+
+ def remove_from_chain(self):
+ if self._prev_proc:
+ self._prev_proc._next_proc = self._next_proc
+ if self._next_proc:
+ self._next_proc._prev_proc = self._prev_proc
+
+ def setup(self, requirement=DROP_RESULT):
+ """
+ Method called by previous processor or processor pipeline creator to let
+ the processors know what part of the result can be ignored.
+ """
+ self._prev_requirement = requirement
+ if self._next_proc:
+ self._next_proc.setup(max(requirement, self._requirement))
+ if self._prev_requirement < self._requirement:
+ self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+ def next_test(self, test):
+ """
+ Method called by previous processor whenever it produces new test.
+ This method shouldn't be called by anyone except previous processor.
+ """
+ raise NotImplementedError()
+
+ def result_for(self, test, result):
+ """
+ Method called by next processor whenever it has result for some test.
+ This method shouldn't be called by anyone except next processor.
+ """
+ raise NotImplementedError()
+
+ def heartbeat(self):
+ if self._prev_proc:
+ self._prev_proc.heartbeat()
+
+ ### Communication
+
+ def _send_test(self, test):
+ """Helper method for sending test to the next processor."""
+ self._next_proc.next_test(test)
+
+ def _send_result(self, test, result):
+ """Helper method for sending result to the previous processor."""
+ result = self._reduce_result(result)
+ self._prev_proc.result_for(test, result)
+
+
+
+class TestProcObserver(TestProc):
+ """Processor used for observing the data."""
+ def __init__(self):
+ super(TestProcObserver, self).__init__()
+
+ def next_test(self, test):
+ self._on_next_test(test)
+ self._send_test(test)
+
+ def result_for(self, test, result):
+ self._on_result_for(test, result)
+ self._send_result(test, result)
+
+ def heartbeat(self):
+ self._on_heartbeat()
+ super(TestProcObserver, self).heartbeat()
+
+ def _on_next_test(self, test):
+ """Method called after receiving test from previous processor but before
+ sending it to the next one."""
+ pass
+
+ def _on_result_for(self, test, result):
+ """Method called after receiving result from next processor but before
+ sending it to the previous one."""
+ pass
+
+ def _on_heartbeat(self):
+ pass
+
+
+class TestProcProducer(TestProc):
+ """Processor for creating subtests."""
+
+ def __init__(self, name):
+ super(TestProcProducer, self).__init__()
+ self._name = name
+
+ def next_test(self, test):
+ self._next_test(test)
+
+ def result_for(self, subtest, result):
+ self._result_for(subtest.origin, subtest, result)
+
+ ### Implementation
+ def _next_test(self, test):
+ raise NotImplementedError()
+
+ def _result_for(self, test, subtest, result):
+ """
+ result_for method extended with `subtest` parameter.
+
+ Args
+ test: test used by current processor to create the subtest.
+ subtest: test for which the `result` is.
+ result: subtest execution result created by the output processor.
+ """
+ raise NotImplementedError()
+
+ ### Managing subtests
+ def _create_subtest(self, test, subtest_id, **kwargs):
+ """Creates subtest with subtest id <processor name>-`subtest_id`."""
+ return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
+ **kwargs)
+
+
+class TestProcFilter(TestProc):
+ """Processor for filtering tests."""
+
+ def next_test(self, test):
+ if self._filter(test):
+ self._send_result(test, SKIPPED)
+ else:
+ self._send_test(test)
+
+ def result_for(self, test, result):
+ self._send_result(test, result)
+
+ def _filter(self, test):
+ """Returns whether test should be filtered out."""
+ raise NotImplementedError()
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
new file mode 100644
index 0000000000..021b02af3e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -0,0 +1,92 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import traceback
+
+from . import base
+from ..local import pool
+
+
+# Global function for multiprocessing, because pickling a static method doesn't
+# work on Windows.
+def run_job(job, process_context):
+ return job.run(process_context)
+
+
+def create_process_context(requirement):
+ return ProcessContext(base.get_reduce_result_function(requirement))
+
+
+JobResult = collections.namedtuple('JobResult', ['id', 'result'])
+ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+
+
+class Job(object):
+ def __init__(self, test_id, cmd, outproc, keep_output):
+ self.test_id = test_id
+ self.cmd = cmd
+ self.outproc = outproc
+ self.keep_output = keep_output
+
+ def run(self, process_ctx):
+ output = self.cmd.execute()
+ result = self.outproc.process(output)
+ if not self.keep_output:
+ result = process_ctx.reduce_result_f(result)
+ return JobResult(self.test_id, result)
+
+
+class ExecutionProc(base.TestProc):
+ """Last processor in the chain. Instead of passing tests further it creates
+ commands and output processors, executes them in multiple worker processes and
+ sends results to the previous processor.
+ """
+
+ def __init__(self, jobs, context):
+ super(ExecutionProc, self).__init__()
+ self._pool = pool.Pool(jobs)
+ self._context = context
+ self._tests = {}
+
+ def connect_to(self, next_proc):
+ assert False, 'ExecutionProc cannot be connected to anything'
+
+ def start(self):
+ try:
+ it = self._pool.imap_unordered(
+ fn=run_job,
+ gen=[],
+ process_context_fn=create_process_context,
+ process_context_args=[self._prev_requirement],
+ )
+ for pool_result in it:
+ if pool_result.heartbeat:
+ continue
+
+ job_result = pool_result.value
+ test_id, result = job_result
+
+ test, result.cmd = self._tests[test_id]
+ del self._tests[test_id]
+ self._send_result(test, result)
+ except KeyboardInterrupt:
+ raise
+ except:
+ traceback.print_exc()
+ raise
+ finally:
+ self._pool.terminate()
+
+ def next_test(self, test):
+ test_id = test.procid
+ cmd = test.get_command(self._context)
+ self._tests[test_id] = test, cmd
+
+ # TODO(majeski): Needs factory for outproc as in local/execution.py
+ outproc = test.output_proc
+ self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
+
+ def result_for(self, test, result):
+ assert False, 'ExecutionProc cannot receive results'
diff --git a/deps/v8/tools/testrunner/testproc/filter.py b/deps/v8/tools/testrunner/testproc/filter.py
new file mode 100644
index 0000000000..5081997751
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/filter.py
@@ -0,0 +1,83 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import fnmatch
+
+from . import base
+
+
+class StatusFileFilterProc(base.TestProcFilter):
+ """Filters tests by outcomes from status file.
+
+ Status file has to be loaded before using this function.
+
+ Args:
+ slow_tests_mode: What to do with slow tests.
+ pass_fail_tests_mode: What to do with pass or fail tests.
+
+ Mode options:
+ None (default): don't skip
+ "skip": skip if slow/pass_fail
+ "run": skip if not slow/pass_fail
+ """
+
+ def __init__(self, slow_tests_mode, pass_fail_tests_mode):
+ super(StatusFileFilterProc, self).__init__()
+ self._slow_tests_mode = slow_tests_mode
+ self._pass_fail_tests_mode = pass_fail_tests_mode
+
+ def _filter(self, test):
+ return (
+ test.do_skip or
+ self._skip_slow(test.is_slow) or
+ self._skip_pass_fail(test.is_pass_or_fail)
+ )
+
+ def _skip_slow(self, is_slow):
+ return (
+ (self._slow_tests_mode == 'run' and not is_slow) or
+ (self._slow_tests_mode == 'skip' and is_slow)
+ )
+
+ def _skip_pass_fail(self, is_pass_fail):
+ return (
+ (self._pass_fail_tests_mode == 'run' and not is_pass_fail) or
+ (self._pass_fail_tests_mode == 'skip' and is_pass_fail)
+ )
+
+
+class NameFilterProc(base.TestProcFilter):
+ """Filters tests based on command-line arguments.
+
+ args can be a glob: asterisks in any position of the name
+ represent zero or more characters. Without asterisks, only exact matches
+ will be used with the exeption of the test-suite name as argument.
+ """
+ def __init__(self, args):
+ super(NameFilterProc, self).__init__()
+
+ self._globs = defaultdict(list)
+ for a in args:
+ argpath = a.split('/')
+ suitename = argpath[0]
+ path = '/'.join(argpath[1:]) or '*'
+ self._globs[suitename].append(path)
+
+ for s, globs in self._globs.iteritems():
+ if not globs or '*' in globs:
+ self._globs[s] = []
+
+ def _filter(self, test):
+ globs = self._globs.get(test.suite.name)
+ if globs is None:
+ return True
+
+ if not globs:
+ return False
+
+ for g in globs:
+ if fnmatch.fnmatch(test.path, g):
+ return False
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/loader.py b/deps/v8/tools/testrunner/testproc/loader.py
new file mode 100644
index 0000000000..0a3d0df1b3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/loader.py
@@ -0,0 +1,27 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class LoadProc(base.TestProc):
+ """First processor in the chain that passes all tests to the next processor.
+ """
+
+ def load_tests(self, tests):
+ loaded = set()
+ for test in tests:
+ if test.procid in loaded:
+ print 'Warning: %s already obtained' % test.procid
+ continue
+
+ loaded.add(test.procid)
+ self._send_test(test)
+
+ def next_test(self, test):
+ assert False, 'Nothing can be connected to the LoadProc'
+
+ def result_for(self, test, result):
+ # Ignore all results.
+ pass
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
new file mode 100644
index 0000000000..78514f7252
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -0,0 +1,385 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import sys
+import time
+
+from . import base
+from ..local import junit_output
+
+
+def print_failure_header(test):
+ if test.output_proc.negative:
+ negative_marker = '[negative] '
+ else:
+ negative_marker = ''
+ print "=== %(label)s %(negative)s===" % {
+ 'label': test,
+ 'negative': negative_marker,
+ }
+
+
+class TestsCounter(base.TestProcObserver):
+ def __init__(self):
+ super(TestsCounter, self).__init__()
+ self.total = 0
+
+ def _on_next_test(self, test):
+ self.total += 1
+
+
+class ResultsTracker(base.TestProcObserver):
+ def __init__(self):
+ super(ResultsTracker, self).__init__()
+ self._requirement = base.DROP_OUTPUT
+
+ self.failed = 0
+ self.remaining = 0
+ self.total = 0
+
+ def _on_next_test(self, test):
+ self.total += 1
+ self.remaining += 1
+
+ def _on_result_for(self, test, result):
+ self.remaining -= 1
+ if result.has_unexpected_output:
+ self.failed += 1
+
+
+class ProgressIndicator(base.TestProcObserver):
+ def finished(self):
+ pass
+
+
+class SimpleProgressIndicator(ProgressIndicator):
+ def __init__(self):
+ super(SimpleProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_OUTPUT
+
+ self._failed = []
+ self._total = 0
+
+ def _on_next_test(self, test):
+ self._total += 1
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ self._failed.append((test, result))
+
+ def finished(self):
+ crashed = 0
+ print
+ for test, result in self._failed:
+ print_failure_header(test)
+ if result.output.stderr:
+ print "--- stderr ---"
+ print result.output.stderr.strip()
+ if result.output.stdout:
+ print "--- stdout ---"
+ print result.output.stdout.strip()
+ print "Command: %s" % result.cmd.to_string()
+ if result.output.HasCrashed():
+ print "exit code: %d" % result.output.exit_code
+ print "--- CRASHED ---"
+ crashed += 1
+ if result.output.HasTimedOut():
+ print "--- TIMEOUT ---"
+ if len(self._failed) == 0:
+ print "==="
+ print "=== All tests succeeded"
+ print "==="
+ else:
+ print
+ print "==="
+ print "=== %i tests failed" % len(self._failed)
+ if crashed > 0:
+ print "=== %i tests CRASHED" % crashed
+ print "==="
+
+
+class VerboseProgressIndicator(SimpleProgressIndicator):
+ def _on_result_for(self, test, result):
+ super(VerboseProgressIndicator, self)._on_result_for(test, result)
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ if result.output.HasCrashed():
+ outcome = 'CRASH'
+ else:
+ outcome = 'FAIL'
+ else:
+ outcome = 'pass'
+ print 'Done running %s: %s' % (test, outcome)
+ sys.stdout.flush()
+
+ def _on_heartbeat(self):
+ print 'Still working...'
+ sys.stdout.flush()
+
+
+class DotsProgressIndicator(SimpleProgressIndicator):
+ def __init__(self):
+ super(DotsProgressIndicator, self).__init__()
+ self._count = 0
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ self._count += 1
+ if self._count > 1 and self._count % 50 == 1:
+ sys.stdout.write('\n')
+ if result.has_unexpected_output:
+ if result.output.HasCrashed():
+ sys.stdout.write('C')
+ sys.stdout.flush()
+ elif result.output.HasTimedOut():
+ sys.stdout.write('T')
+ sys.stdout.flush()
+ else:
+ sys.stdout.write('F')
+ sys.stdout.flush()
+ else:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+
+class CompactProgressIndicator(ProgressIndicator):
+ def __init__(self, templates):
+ super(CompactProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_OUTPUT
+
+ self._templates = templates
+ self._last_status_length = 0
+ self._start_time = time.time()
+
+ self._total = 0
+ self._passed = 0
+ self._failed = 0
+
+ def _on_next_test(self, test):
+ self._total += 1
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ self._failed += 1
+ else:
+ self._passed += 1
+
+ self._print_progress(str(test))
+ if result.has_unexpected_output:
+ output = result.output
+ stdout = output.stdout.strip()
+ stderr = output.stderr.strip()
+
+ self._clear_line(self._last_status_length)
+ print_failure_header(test)
+ if len(stdout):
+ print self._templates['stdout'] % stdout
+ if len(stderr):
+ print self._templates['stderr'] % stderr
+ print "Command: %s" % result.cmd
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
+ print "--- CRASHED ---"
+ if output.HasTimedOut():
+ print "--- TIMEOUT ---"
+
+ def finished(self):
+ self._print_progress('Done')
+ print
+
+ def _print_progress(self, name):
+ self._clear_line(self._last_status_length)
+ elapsed = time.time() - self._start_time
+ if not self._total:
+ progress = 0
+ else:
+ progress = (self._passed + self._failed) * 100 // self._total
+ status = self._templates['status_line'] % {
+ 'passed': self._passed,
+ 'progress': progress,
+ 'failed': self._failed,
+ 'test': name,
+ 'mins': int(elapsed) / 60,
+ 'secs': int(elapsed) % 60
+ }
+ status = self._truncate(status, 78)
+ self._last_status_length = len(status)
+ print status,
+ sys.stdout.flush()
+
+ def _truncate(self, string, length):
+ if length and len(string) > (length - 3):
+ return string[:(length - 3)] + "..."
+ else:
+ return string
+
+ def _clear_line(self, last_length):
+ raise NotImplementedError()
+
+
+class ColorProgressIndicator(CompactProgressIndicator):
+ def __init__(self):
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|"
+ "\033[34m%%%(progress) 4d\033[0m|"
+ "\033[32m+%(passed) 4d\033[0m|"
+ "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
+ 'stdout': "\033[1m%s\033[0m",
+ 'stderr': "\033[31m%s\033[0m",
+ }
+ super(ColorProgressIndicator, self).__init__(templates)
+
+ def _clear_line(self, last_length):
+ print "\033[1K\r",
+
+
+class MonochromeProgressIndicator(CompactProgressIndicator):
+ def __init__(self):
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
+ "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
+ 'stdout': '%s',
+ 'stderr': '%s',
+ }
+ super(MonochromeProgressIndicator, self).__init__(templates)
+
+ def _clear_line(self, last_length):
+ print ("\r" + (" " * last_length) + "\r"),
+
+
+class JUnitTestProgressIndicator(ProgressIndicator):
+ def __init__(self, junitout, junittestsuite):
+ super(JUnitTestProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_STDOUT
+
+ self.outputter = junit_output.JUnitTestOutput(junittestsuite)
+ if junitout:
+ self.outfile = open(junitout, "w")
+ else:
+ self.outfile = sys.stdout
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ fail_text = ""
+ output = result.output
+ if result.has_unexpected_output:
+ stdout = output.stdout.strip()
+ if len(stdout):
+ fail_text += "stdout:\n%s\n" % stdout
+ stderr = output.stderr.strip()
+ if len(stderr):
+ fail_text += "stderr:\n%s\n" % stderr
+ fail_text += "Command: %s" % result.cmd.to_string()
+ if output.HasCrashed():
+ fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
+ if output.HasTimedOut():
+ fail_text += "--- TIMEOUT ---"
+ self.outputter.HasRunTest(
+ test_name=str(test),
+ test_cmd=result.cmd.to_string(relative=True),
+ test_duration=output.duration,
+ test_failure=fail_text)
+
+ def finished(self):
+ self.outputter.FinishAndWrite(self.outfile)
+ if self.outfile != sys.stdout:
+ self.outfile.close()
+
+
+class JsonTestProgressIndicator(ProgressIndicator):
+ def __init__(self, json_test_results, arch, mode, random_seed):
+ super(JsonTestProgressIndicator, self).__init__()
+ # We want to drop stdout/err for all passed tests on the first try, but we
+ # need to get outputs for all runs after the first one. To accommodate that,
+ # reruns are set to keep the result no matter what requirement says, i.e.
+ # keep_output set to True in the RerunProc.
+ self._requirement = base.DROP_PASS_STDOUT
+
+ self.json_test_results = json_test_results
+ self.arch = arch
+ self.mode = mode
+ self.random_seed = random_seed
+ self.results = []
+ self.tests = []
+
+ def _on_result_for(self, test, result):
+ if result.is_rerun:
+ self.process_results(test, result.results)
+ else:
+ self.process_results(test, [result])
+
+ def process_results(self, test, results):
+ for run, result in enumerate(results):
+ # TODO(majeski): Support for dummy/grouped results
+ output = result.output
+ # Buffer all tests for sorting the durations in the end.
+ # TODO(machenbach): Running average + buffer only slowest 20 tests.
+ self.tests.append((test, output.duration, result.cmd))
+
+ # Omit tests that run as expected on the first try.
+ # Everything that happens after the first run is included in the output
+ # even if it flakily passes.
+ if not result.has_unexpected_output and run == 0:
+ continue
+
+ self.results.append({
+ "name": str(test),
+ "flags": result.cmd.args,
+ "command": result.cmd.to_string(relative=True),
+ "run": run + 1,
+ "stdout": output.stdout,
+ "stderr": output.stderr,
+ "exit_code": output.exit_code,
+ "result": test.output_proc.get_outcome(output),
+ "expected": test.expected_outcomes,
+ "duration": output.duration,
+
+ # TODO(machenbach): This stores only the global random seed from the
+ # context and not possible overrides when using random-seed stress.
+ "random_seed": self.random_seed,
+ "target_name": test.get_shell(),
+ "variant": test.variant,
+ })
+
+ def finished(self):
+ complete_results = []
+ if os.path.exists(self.json_test_results):
+ with open(self.json_test_results, "r") as f:
+ # Buildbot might start out with an empty file.
+ complete_results = json.loads(f.read() or "[]")
+
+ duration_mean = None
+ if self.tests:
+ # Get duration mean.
+ duration_mean = (
+ sum(duration for (_, duration, cmd) in self.tests) /
+ float(len(self.tests)))
+
+ # Sort tests by duration.
+ self.tests.sort(key=lambda (_, duration, cmd): duration, reverse=True)
+ slowest_tests = [
+ {
+ "name": str(test),
+ "flags": cmd.args,
+ "command": cmd.to_string(relative=True),
+ "duration": duration,
+ "marked_slow": test.is_slow,
+ } for (test, duration, cmd) in self.tests[:20]
+ ]
+
+ complete_results.append({
+ "arch": self.arch,
+ "mode": self.mode,
+ "results": self.results,
+ "slowest_tests": slowest_tests,
+ "duration_mean": duration_mean,
+ "test_total": len(self.tests),
+ })
+
+ with open(self.json_test_results, "w") as f:
+ f.write(json.dumps(complete_results))
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
new file mode 100644
index 0000000000..7f96e0260c
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -0,0 +1,59 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from . import base
+from .result import RerunResult
+
+
+class RerunProc(base.TestProcProducer):
+ def __init__(self, rerun_max, rerun_max_total=None):
+ super(RerunProc, self).__init__('Rerun')
+ self._requirement = base.DROP_OUTPUT
+
+ self._rerun = {}
+ self._results = collections.defaultdict(list)
+ self._rerun_max = rerun_max
+ self._rerun_total_left = rerun_max_total
+
+ def _next_test(self, test):
+ self._send_next_subtest(test)
+
+ def _result_for(self, test, subtest, result):
+ # First result
+ if subtest.procid[-2:] == '-1':
+ # Passed, no reruns
+ if not result.has_unexpected_output:
+ self._send_result(test, result)
+ return
+
+ self._rerun[test.procid] = 0
+
+ results = self._results[test.procid]
+ results.append(result)
+
+ if self._needs_rerun(test, result):
+ self._rerun[test.procid] += 1
+ if self._rerun_total_left is not None:
+ self._rerun_total_left -= 1
+ self._send_next_subtest(test, self._rerun[test.procid])
+ else:
+ result = RerunResult.create(results)
+ self._finalize_test(test)
+ self._send_result(test, result)
+
+ def _needs_rerun(self, test, result):
+ # TODO(majeski): Limit reruns count for slow tests.
+ return ((self._rerun_total_left is None or self._rerun_total_left > 0) and
+ self._rerun[test.procid] < self._rerun_max and
+ result.has_unexpected_output)
+
+ def _send_next_subtest(self, test, run=0):
+ subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
+ self._send_test(subtest)
+
+ def _finalize_test(self, test):
+ del self._rerun[test.procid]
+ del self._results[test.procid]
diff --git a/deps/v8/tools/testrunner/testproc/result.py b/deps/v8/tools/testrunner/testproc/result.py
new file mode 100644
index 0000000000..c817fc06ec
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/result.py
@@ -0,0 +1,97 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ResultBase(object):
+ @property
+ def is_skipped(self):
+ return False
+
+ @property
+ def is_grouped(self):
+ return False
+
+ @property
+ def is_rerun(self):
+ return False
+
+
+class Result(ResultBase):
+ """Result created by the output processor."""
+
+ def __init__(self, has_unexpected_output, output, cmd=None):
+ self.has_unexpected_output = has_unexpected_output
+ self.output = output
+ self.cmd = cmd
+
+
+class GroupedResult(ResultBase):
+ """Result consisting of multiple results. It can be used by processors that
+ create multiple subtests for each test and want to pass all results back.
+ """
+
+ @staticmethod
+ def create(results):
+ """Create grouped result from the list of results. It filters out skipped
+ results. If all results are skipped results it returns skipped result.
+
+ Args:
+ results: list of pairs (test, result)
+ """
+ results = [(t, r) for (t, r) in results if not r.is_skipped]
+ if not results:
+ return SKIPPED
+ return GroupedResult(results)
+
+ def __init__(self, results):
+ self.results = results
+
+ @property
+ def is_grouped(self):
+ return True
+
+
+class SkippedResult(ResultBase):
+ """Result without any meaningful value. Used primarily to inform the test
+ processor that it's test wasn't executed.
+ """
+
+ @property
+ def is_skipped(self):
+ return True
+
+
+SKIPPED = SkippedResult()
+
+
+class RerunResult(Result):
+ """Result generated from several reruns of the same test. It's a subclass of
+ Result since the result of rerun is result of the last run. In addition to
+ normal result it contains results of all reruns.
+ """
+ @staticmethod
+ def create(results):
+ """Create RerunResult based on list of results. List cannot be empty. If it
+ has only one element it's returned as a result.
+ """
+ assert results
+
+ if len(results) == 1:
+ return results[0]
+ return RerunResult(results)
+
+ def __init__(self, results):
+ """Has unexpected output and the output itself of the RerunResult equals to
+ the last result in the passed list.
+ """
+ assert results
+
+ last = results[-1]
+ super(RerunResult, self).__init__(last.has_unexpected_output, last.output,
+ last.cmd)
+ self.results = results
+
+ @property
+ def is_rerun(self):
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/shard.py b/deps/v8/tools/testrunner/testproc/shard.py
new file mode 100644
index 0000000000..1caac9fee6
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/shard.py
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class ShardProc(base.TestProcFilter):
+ """Processor distributing tests between shards.
+ It simply passes every n-th test. To be deterministic it has to be placed
+ before all processors that generate tests dynamically.
+ """
+ def __init__(self, myid, shards_count):
+ """
+ Args:
+ myid: id of the shard within [0; shards_count - 1]
+ shards_count: number of shards
+ """
+ super(ShardProc, self).__init__()
+
+ assert myid >= 0 and myid < shards_count
+
+ self._myid = myid
+ self._shards_count = shards_count
+ self._last = 0
+
+ def _filter(self, test):
+ res = self._last != self._myid
+ self._last = (self._last + 1) % self._shards_count
+ return res
diff --git a/deps/v8/tools/testrunner/testproc/variant.py b/deps/v8/tools/testrunner/testproc/variant.py
new file mode 100644
index 0000000000..dba1af91fc
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/variant.py
@@ -0,0 +1,68 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+from ..local.variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .result import GroupedResult
+
+
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantProc(base.TestProcProducer):
+ """Processor creating variants.
+
+ For each test it keeps generator that returns variant, flags and id suffix.
+ It produces variants one at a time, so it's waiting for the result of one
+ variant to create another variant of the same test.
+ It maintains the order of the variants passed to the init.
+
+ There are some cases when particular variant of the test is not valid. To
+ ignore subtests like that, StatusFileFilterProc should be placed somewhere
+ after the VariantProc.
+ """
+
+ def __init__(self, variants):
+ super(VariantProc, self).__init__('VariantProc')
+ self._next_variant = {}
+ self._variant_gens = {}
+ self._variants = variants
+
+ def setup(self, requirement=base.DROP_RESULT):
+ super(VariantProc, self).setup(requirement)
+
+ # VariantProc is optimized for dropping the result and it should be placed
+ # in the chain where it's possible.
+ assert requirement == base.DROP_RESULT
+
+ def _next_test(self, test):
+ gen = self._variants_gen(test)
+ self._next_variant[test.procid] = gen
+ self._try_send_new_subtest(test, gen)
+
+ def _result_for(self, test, subtest, result):
+ gen = self._next_variant[test.procid]
+ self._try_send_new_subtest(test, gen)
+
+ def _try_send_new_subtest(self, test, variants_gen):
+ for variant, flags, suffix in variants_gen:
+ subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
+ variant=variant, flags=flags)
+ self._send_test(subtest)
+ return
+
+ del self._next_variant[test.procid]
+ self._send_result(test, None)
+
+ def _variants_gen(self, test):
+ """Generator producing (variant, flags, procid suffix) tuples."""
+ return self._get_variants_gen(test).gen(test)
+
+ def _get_variants_gen(self, test):
+ key = test.suite.name
+ variants_gen = self._variant_gens.get(key)
+ if not variants_gen:
+ variants_gen = test.suite.get_variants_gen(self._variants)
+ self._variant_gens[key] = variants_gen
+ return variants_gen
diff --git a/deps/v8/tools/turbolizer/code-view.js b/deps/v8/tools/turbolizer/code-view.js
index 8165210c31..7f9728a83a 100644
--- a/deps/v8/tools/turbolizer/code-view.js
+++ b/deps/v8/tools/turbolizer/code-view.js
@@ -164,8 +164,6 @@ class CodeView extends View {
}
}
}
-
- view.resizeToParent();
}
deleteContent() {}
diff --git a/deps/v8/tools/turbolizer/graph-view.js b/deps/v8/tools/turbolizer/graph-view.js
index 8de050f3e6..cdbc40c8f0 100644
--- a/deps/v8/tools/turbolizer/graph-view.js
+++ b/deps/v8/tools/turbolizer/graph-view.js
@@ -706,6 +706,7 @@ class GraphView extends View {
.on("mousedown", function(d){
graph.pathMouseDown.call(graph, d3.select(this), d);
})
+ .attr("adjacentToHover", "false");
// Set the correct styles on all of the paths
visibleEdges.classed('value', function(e) {
@@ -740,7 +741,8 @@ class GraphView extends View {
var newGs = graph.visibleNodes.enter()
.append("g");
- newGs.classed("control", function(n) { return n.isControl(); })
+ newGs.classed("turbonode", function(n) { return true; })
+ .classed("control", function(n) { return n.isControl(); })
.classed("live", function(n) { return n.isLive(); })
.classed("dead", function(n) { return !n.isLive(); })
.classed("javascript", function(n) { return n.isJavaScript(); })
@@ -754,6 +756,34 @@ class GraphView extends View {
.on("mouseup", function(d){
graph.nodeMouseUp.call(graph, d3.select(this), d);
})
+ .on('mouseover', function(d){
+ var nodeSelection = d3.select(this);
+ let node = graph.nodeMap[d.id];
+ let adjInputEdges = graph.visibleEdges.filter(e => { return e.target === node; });
+ let adjOutputEdges = graph.visibleEdges.filter(e => { return e.source === node; });
+ adjInputEdges.attr('relToHover', "input");
+ adjOutputEdges.attr('relToHover', "output");
+ let adjInputNodes = adjInputEdges.data().map(e => e.source);
+ graph.visibleNodes.data(adjInputNodes, function(d) {
+ return d.id;
+ }).attr('relToHover', "input");
+ let adjOutputNodes = adjOutputEdges.data().map(e => e.target);
+ graph.visibleNodes.data(adjOutputNodes, function(d) {
+ return d.id;
+ }).attr('relToHover', "output");
+ graph.updateGraphVisibility();
+ })
+ .on('mouseout', function(d){
+ var nodeSelection = d3.select(this);
+ let node = graph.nodeMap[d.id];
+ let adjEdges = graph.visibleEdges.filter(e => { return e.target === node || e.source === node; });
+ adjEdges.attr('relToHover', "none");
+ let adjNodes = adjEdges.data().map(e => e.target).concat(adjEdges.data().map(e => e.source));
+ let nodes = graph.visibleNodes.data(adjNodes, function(d) {
+ return d.id;
+ }).attr('relToHover', "none");
+ graph.updateGraphVisibility();
+ })
.call(graph.drag);
newGs.append("rect")
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
index 4066fd8010..552e83783a 100644
--- a/deps/v8/tools/turbolizer/index.html
+++ b/deps/v8/tools/turbolizer/index.html
@@ -4,13 +4,14 @@
<title>Turbolizer</title>
<link rel="stylesheet" href="turbo-visualizer.css" />
</head>
- <body width="100%">
+ <body>
<div id="left">
<div id='source-text'>
<pre id='source-text-pre'\>
</div>
</div>
- <div id="middle">
+ <div class="resizer-left"></div>
+ <div id="middle" class="resizable-pane">
<div id="graph-toolbox-anchor">
<span id="graph-toolbox">
<input id="layout" type="image" title="layout graph" src="layout-icon.png"
@@ -47,12 +48,13 @@
<pre id="schedule-text-pre" class='prettyprint prettyprinted'>
<ul id="schedule-list" class='nolinenums noindent'>
</ul>
- </pre>
+ </pre>
</div>
<div id='text-placeholder' width="0px" height="0px" style="position: absolute; top:100000px;" ><svg><text text-anchor="right">
<tspan white-space="inherit" id="text-measure"/>
</text></svg></div>
</div>
+ <div class="resizer-right"></div>
<div id="right">
<div id='disassembly'>
<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
@@ -63,15 +65,15 @@
</div>
<div id="source-collapse" class="collapse-pane">
<input id="source-expand" type="image" title="show source"
- src="right-arrow.png" class="button-input-invisible">
+ src="right-arrow.png" class="button-input invisible">
<input id="source-shrink" type="image" title="hide source"
src="left-arrow.png" class="button-input">
</div>
<div id="disassembly-collapse" class="collapse-pane">
<input id="disassembly-expand" type="image" title="show disassembly"
- src="left-arrow.png" class="button-input">
- <input id="disassembly-shrink" type="image" title="hide disassembly"
- src="right-arrow.png" class="button-input-invisible">
+ src="left-arrow.png" class="button-input invisible">
+ <input id="disassembly-shrink" type="image" title="hide disassembly"
+ src="right-arrow.png" class="button-input">
</div>
<script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index 69a6ccabb5..7fd9c4852a 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -41,10 +41,8 @@
outline: none;
}
-.button-input-invisible {
- vertical-align: middle;
- width: 0px;
- visibility: hidden;
+.invisible {
+ display: none;
}
@@ -57,9 +55,12 @@
!important
}
+
body {
margin: 0;
padding: 0;
+ height: 100vh;
+ width: 100vw;
overflow:hidden;
-webkit-touch-callout: none;
-webkit-user-select: none;
@@ -69,8 +70,8 @@ body {
user-select: none;
}
-p {
- text-align: center;
+p {
+ text-align: center;
overflow: overlay;
position: relative;
}
@@ -97,21 +98,47 @@ div.scrollable {
overflow-y: _croll; overflow-x: hidden;
}
-g.control rect {
+g.turbonode[relToHover="input"] rect {
+ stroke: #67e62c;
+ stroke-width: 16px;
+}
+
+g.turbonode[relToHover="output"] rect {
+ stroke: #d23b14;
+ stroke-width: 16px;
+}
+
+path[relToHover="input"] {
+ stroke: #67e62c;
+ stroke-width: 16px;
+}
+
+path[relToHover="output"] {
+ stroke: #d23b14;
+ stroke-width: 16px;
+}
+
+
+g.turbonode:hover rect {
+ stroke: #000000;
+ stroke-width: 7px;
+}
+
+g.control rect {
fill: #EFCC00;
stroke: #080808;
stroke-width: 5px;
}
-g.javascript rect {
+g.javascript rect {
fill: #DD7E6B;
}
-g.simplified rect {
+g.simplified rect {
fill: #3C78D8;
}
-g.machine rect {
+g.machine rect {
fill: #6AA84F;
}
@@ -156,47 +183,14 @@ circle.halfFilledBubbleStyle:hover {
stroke-width: 3px;
}
-path.effect {
+path {
fill: none;
stroke: #080808;
stroke-width: 4px;
cursor: default;
}
-path.effect:hover {
- stroke-width: 6px;
-}
-
-path.control {
- fill: none;
- stroke: #080808;
- stroke-width: 4px;
- cursor: default;
-}
-
-path.control:hover {
- stroke-width: 6px;
-}
-
-path.value {
- fill: none;
- stroke: #888888;
- stroke-width: 4px;
- cursor: default;
-}
-
-path.value:hover {
- stroke-width: 6px;
-}
-
-path.frame-state {
- fill: none;
- stroke: #080808;
- stroke-width: 4px;
- cursor: default;
-}
-
-path.frame-state:hover{
+path:hover {
stroke-width: 6px;
}
@@ -246,33 +240,20 @@ span.linkable-text:hover {
font-weight: bold;
}
+
#left {
float: left; height: 100%; background-color: #FFFFFF;
- -webkit-transition: all 1s ease-in-out;
- -moz-transition: all 1s ease-in-out;
- -o-transition: all 1s ease-in-out;
- transition: all .3s ease-in-out;
- transition-property: width;
}
#middle {
- float:left; height: 100%; background-color: #F8F8F8;
- -webkit-transition: all 1s ease-in-out;
- -moz-transition: all 1s ease-in-out;
- -o-transition: all 1s ease-in-out;
- transition: all .3s ease-in-out;
- transition-property: width;
+ float:left; height: 100%; background-color: #F8F8F8;
}
#right {
- float: right; background-color: #FFFFFF;
- -webkit-transition: all 1s ease-in-out;
- -moz-transition: all 1s ease-in-out;
- -o-transition: all 1s ease-in-out;
- transition: all .3s ease-in-out;
- transition-property: width;
+ float: right; background-color: #FFFFFF;
}
+
#disassembly-collapse {
right: 0;
}
@@ -288,7 +269,7 @@ span.linkable-text:hover {
#graph-toolbox {
position: relative;
top: 1em;
- left: 0.7em;
+ left: 25px;
border: 2px solid #eee8d5;
border-radius: 5px;
padding: 0.7em;
@@ -337,4 +318,44 @@ tspan {
text {
dominant-baseline: text-before-edge;
+}
+
+.resizer-left {
+ position:absolute;
+ width: 4px;
+ height:100%;
+ background: #a0a0a0;
+ cursor: pointer;
+}
+
+.resizer-left.snapped {
+ width: 12px;
+}
+
+.resizer-left:hover {
+ background: orange;
+}
+
+.resizer-left.dragged {
+ background: orange;
+}
+
+.resizer-right {
+ position:absolute;
+ width: 4px;
+ height:100%;
+ background: #a0a0a0;
+ cursor: pointer;
+}
+
+.resizer-right.snapped {
+ width: 12px;
+}
+
+.resizer-right:hover {
+ background: orange;
+}
+
+.resizer-right.dragged {
+ background: orange;
} \ No newline at end of file
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js
index 280caf01db..c04384810b 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.js
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.js
@@ -1,99 +1,191 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-document.onload = (function(d3){
- "use strict";
- var jsonObj;
- var sourceExpandClassList = document.getElementById(SOURCE_EXPAND_ID).classList;
- var sourceCollapseClassList = document.getElementById(SOURCE_COLLAPSE_ID).classList;
- var sourceExpanded = sourceCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE);
- var disassemblyExpandClassList = document.getElementById(DISASSEMBLY_EXPAND_ID).classList;
- var disassemblyCollapseClassList = document.getElementById(DISASSEMBLY_COLLAPSE_ID).classList;
- var disassemblyExpanded = disassemblyCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE);
- var svg = null;
- var graph = null;
- var schedule = null;
- var empty = null;
- var currentPhaseView = null;
- var disassemblyView = null;
- var sourceView = null;
- var selectionBroker = null;
+class Snapper {
- function updatePanes() {
- if (sourceExpanded) {
- if (disassemblyExpanded) {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "30%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "40%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "30%");
- } else {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "50%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%");
- }
- } else {
- if (disassemblyExpanded) {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "50%");
- } else {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "100%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%");
- }
- }
+ constructor(resizer) {
+ let snapper = this;
+ snapper.resizer = resizer;
+ snapper.sourceExpand = d3.select("#" + SOURCE_EXPAND_ID);
+ snapper.sourceCollapse = d3.select("#" + SOURCE_COLLAPSE_ID);
+ snapper.disassemblyExpand = d3.select("#" + DISASSEMBLY_EXPAND_ID);
+ snapper.disassemblyCollapse = d3.select("#" + DISASSEMBLY_COLLAPSE_ID);
+
+ d3.select("#source-collapse").on("click", function(){
+ resizer.snapper.toggleSourceExpanded();
+ });
+ d3.select("#disassembly-collapse").on("click", function(){
+ resizer.snapper.toggleDisassemblyExpanded();
+ });
}
- function getLastExpandedState(type, default_state) {
+ getLastExpandedState(type, default_state) {
var state = window.sessionStorage.getItem("expandedState-"+type);
if (state === null) return default_state;
return state === 'true';
}
- function setLastExpandedState(type, state) {
+ setLastExpandedState(type, state) {
window.sessionStorage.setItem("expandedState-"+type, state);
}
- function toggleSourceExpanded() {
- setSourceExpanded(!sourceExpanded);
+ toggleSourceExpanded() {
+ this.setSourceExpanded(!this.sourceExpand.classed("invisible"));
+ }
+
+ sourceExpandUpdate(newState) {
+ this.setLastExpandedState("source", newState);
+ this.sourceExpand.classed("invisible", newState);
+ this.sourceCollapse.classed("invisible", !newState);
}
- function setSourceExpanded(newState) {
- sourceExpanded = newState;
- setLastExpandedState("source", newState);
- updatePanes();
+ setSourceExpanded(newState) {
+ if (this.sourceExpand.classed("invisible") === newState) return;
+ this.sourceExpandUpdate(newState);
+ let resizer = this.resizer;
if (newState) {
- sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
- sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ resizer.sep_left = resizer.sep_left_snap;
+ resizer.sep_left_snap = 0;
} else {
- sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
- sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ resizer.sep_left_snap = resizer.sep_left;
+ resizer.sep_left = 0;
}
+ resizer.updatePanes();
}
- function toggleDisassemblyExpanded() {
- setDisassemblyExpanded(!disassemblyExpanded);
+ toggleDisassemblyExpanded() {
+ this.setDisassemblyExpanded(!this.disassemblyExpand.classed("invisible"));
}
- function setDisassemblyExpanded(newState) {
- disassemblyExpanded = newState;
- setLastExpandedState("disassembly", newState);
- updatePanes();
+ disassemblyExpandUpdate(newState) {
+ this.setLastExpandedState("disassembly", newState);
+ this.disassemblyExpand.classed("invisible", newState);
+ this.disassemblyCollapse.classed("invisible", !newState);
+ }
+
+ setDisassemblyExpanded(newState) {
+ console.log(newState)
+ if (this.disassemblyExpand.classed("invisible") === newState) return;
+ this.disassemblyExpandUpdate(newState);
+ let resizer = this.resizer;
if (newState) {
- disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
- disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ resizer.sep_right = resizer.sep_right_snap;
+ resizer.sep_right_snap = resizer.client_width;
+ console.log("set expand")
} else {
- disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
- disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ resizer.sep_right_snap = resizer.sep_right;
+ resizer.sep_right = resizer.client_width;
+ console.log("set collapse")
}
+ resizer.updatePanes();
+ }
+
+ panesUpated() {
+ this.sourceExpandUpdate(this.resizer.sep_left > this.resizer.dead_width);
+ this.disassemblyExpandUpdate(this.resizer.sep_right <
+ (this.resizer.client_width - this.resizer.dead_width));
+ }
+}
+
+class Resizer {
+ constructor(panes_updated_callback, dead_width) {
+ let resizer = this;
+ resizer.snapper = new Snapper(resizer)
+ resizer.panes_updated_callback = panes_updated_callback;
+ resizer.dead_width = dead_width
+ resizer.client_width = d3.select("body").node().getBoundingClientRect().width;
+ resizer.left = d3.select("#" + SOURCE_PANE_ID);
+ resizer.middle = d3.select("#" + INTERMEDIATE_PANE_ID);
+ resizer.right = d3.select("#" + GENERATED_PANE_ID);
+ resizer.resizer_left = d3.select('.resizer-left');
+ resizer.resizer_right = d3.select('.resizer-right');
+ resizer.sep_left = resizer.client_width/3;
+ resizer.sep_right = resizer.client_width/3*2;
+ resizer.sep_left_snap = 0;
+ resizer.sep_right_snap = 0;
+ // Offset to prevent resizers from sliding slightly over one another.
+ resizer.sep_width_offset = 7;
+
+ let dragResizeLeft = d3.behavior.drag()
+ .on('drag', function() {
+ let x = d3.mouse(this.parentElement)[0];
+ resizer.sep_left = Math.min(Math.max(0,x), resizer.sep_right-resizer.sep_width_offset);
+ resizer.updatePanes();
+ })
+ .on('dragstart', function() {
+ resizer.resizer_left.classed("dragged", true);
+ let x = d3.mouse(this.parentElement)[0];
+ if (x > dead_width) {
+ resizer.sep_left_snap = resizer.sep_left;
+ }
+ })
+ .on('dragend', function() {
+ resizer.resizer_left.classed("dragged", false);
+ });
+ resizer.resizer_left.call(dragResizeLeft);
+
+ let dragResizeRight = d3.behavior.drag()
+ .on('drag', function() {
+ let x = d3.mouse(this.parentElement)[0];
+ resizer.sep_right = Math.max(resizer.sep_left+resizer.sep_width_offset, Math.min(x, resizer.client_width));
+ resizer.updatePanes();
+ })
+ .on('dragstart', function() {
+ resizer.resizer_right.classed("dragged", true);
+ let x = d3.mouse(this.parentElement)[0];
+ if (x < (resizer.client_width-dead_width)) {
+ resizer.sep_right_snap = resizer.sep_right;
+ }
+ })
+ .on('dragend', function() {
+ resizer.resizer_right.classed("dragged", false);
+ });;
+ resizer.resizer_right.call(dragResizeRight);
+ window.onresize = function(){
+ resizer.updateWidths();
+ /*fitPanesToParents();*/
+ resizer.updatePanes();
+ };
+ }
+
+ updatePanes() {
+ let left_snapped = this.sep_left === 0;
+ let right_snapped = this.sep_right >= this.client_width - 1;
+ this.resizer_left.classed("snapped", left_snapped);
+ this.resizer_right.classed("snapped", right_snapped);
+ this.left.style('width', this.sep_left + 'px');
+ this.middle.style('width', (this.sep_right-this.sep_left) + 'px');
+ this.right.style('width', (this.client_width - this.sep_right) + 'px');
+ this.resizer_left.style('left', this.sep_left + 'px');
+ this.resizer_right.style('right', (this.client_width - this.sep_right - 1) + 'px');
+
+ this.snapper.panesUpated();
+ this.panes_updated_callback();
+ }
+
+ updateWidths() {
+ this.client_width = d3.select("body").node().getBoundingClientRect().width;
+ this.sep_right = Math.min(this.sep_right, this.client_width);
+ this.sep_left = Math.min(Math.max(0, this.sep_left), this.sep_right);
+ }
+}
+
+document.onload = (function(d3){
+ "use strict";
+ var jsonObj;
+ var svg = null;
+ var graph = null;
+ var schedule = null;
+ var empty = null;
+ var currentPhaseView = null;
+ var disassemblyView = null;
+ var sourceView = null;
+ var selectionBroker = null;
+ let resizer = new Resizer(panesUpdatedCallback, 100);
+
+ function panesUpdatedCallback() {
+ graph.fitGraphViewToWindow();
}
function hideCurrentPhase() {
@@ -128,8 +220,6 @@ document.onload = (function(d3){
d3.select("#right").classed("scrollable", false);
graph.fitGraphViewToWindow();
- disassemblyView.resizeToParent();
- sourceView.resizeToParent();
d3.select("#left").classed("scrollable", true);
d3.select("#right").classed("scrollable", true);
@@ -138,21 +228,6 @@ document.onload = (function(d3){
selectionBroker = new SelectionBroker();
function initializeHandlers(g) {
- d3.select("#source-collapse").on("click", function(){
- toggleSourceExpanded(true);
- setTimeout(function(){
- g.fitGraphViewToWindow();
- }, 300);
- });
- d3.select("#disassembly-collapse").on("click", function(){
- toggleDisassemblyExpanded();
- setTimeout(function(){
- g.fitGraphViewToWindow();
- }, 300);
- });
- window.onresize = function(){
- fitPanesToParents();
- };
d3.select("#hidden-file-upload").on("change", function() {
if (window.File && window.FileReader && window.FileList) {
var uploadFile = this.files[0];
@@ -238,9 +313,11 @@ document.onload = (function(d3){
initializeHandlers(graph);
- setSourceExpanded(getLastExpandedState("source", true));
- setDisassemblyExpanded(getLastExpandedState("disassembly", false));
+ resizer.snapper.setSourceExpanded(resizer.snapper.getLastExpandedState("source", true));
+ resizer.snapper.setDisassemblyExpanded(resizer.snapper.getLastExpandedState("disassembly", false));
displayPhaseView(empty, null);
fitPanesToParents();
+ resizer.updatePanes();
+
})(window.d3);
diff --git a/deps/v8/tools/turbolizer/view.js b/deps/v8/tools/turbolizer/view.js
index 1ce1056a7f..a7c1f1e417 100644
--- a/deps/v8/tools/turbolizer/view.js
+++ b/deps/v8/tools/turbolizer/view.js
@@ -18,21 +18,9 @@ class View {
show(data, rememberedSelection) {
this.parentNode.appendChild(this.divElement[0][0]);
this.initializeContent(data, rememberedSelection);
- this.resizeToParent();
this.divElement.attr(VISIBILITY, 'visible');
}
- resizeToParent() {
- var view = this;
- var documentElement = document.documentElement;
- var y;
- if (this.parentNode.clientHeight)
- y = Math.max(this.parentNode.clientHeight, documentElement.clientHeight);
- else
- y = documentElement.clientHeight;
- this.parentNode.style.height = y + 'px';
- }
-
hide() {
this.divElement.attr(VISIBILITY, 'hidden');
this.deleteContent();
diff --git a/deps/v8/tools/unittests/PRESUBMIT.py b/deps/v8/tools/unittests/PRESUBMIT.py
new file mode 100644
index 0000000000..d428813e13
--- /dev/null
+++ b/deps/v8/tools/unittests/PRESUBMIT.py
@@ -0,0 +1,9 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def CheckChangeOnCommit(input_api, output_api):
+ # TODO(machenbach): Run all unittests.
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '.', whitelist=['run_tests_test.py$'])
+ return input_api.RunTests(tests)
diff --git a/deps/v8/tools/unittests/predictable_wrapper_test.py b/deps/v8/tools/unittests/predictable_wrapper_test.py
new file mode 100755
index 0000000000..c085fb8879
--- /dev/null
+++ b/deps/v8/tools/unittests/predictable_wrapper_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+import tempfile
+import unittest
+
+TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+PREDICTABLE_WRAPPER = os.path.join(
+ TOOLS_DIR, 'predictable_wrapper.py')
+
+PREDICTABLE_MOCKED = os.path.join(
+ TOOLS_DIR, 'unittests', 'testdata', 'predictable_mocked.py')
+
+def call_wrapper(mode):
+ """Call the predictable wrapper under test with a mocked file to test.
+
+ Instead of d8, we use python and a python mock script. This mock script is
+ expecting two arguments, mode (one of 'equal', 'differ' or 'missing') and
+ a path to a temporary file for simulating non-determinism.
+ """
+ fd, state_file = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ args = [
+ sys.executable,
+ PREDICTABLE_WRAPPER,
+ sys.executable,
+ PREDICTABLE_MOCKED,
+ mode,
+ state_file,
+ ]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ proc.communicate()
+ return proc.returncode
+ finally:
+ os.unlink(state_file)
+
+
+class PredictableTest(unittest.TestCase):
+ def testEqualAllocationOutput(self):
+ self.assertEqual(0, call_wrapper('equal'))
+
+ def testNoAllocationOutput(self):
+ self.assertEqual(2, call_wrapper('missing'))
+
+ def testDifferentAllocationOutput(self):
+ self.assertEqual(3, call_wrapper('differ'))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index fd1e36531a..07dd515406 100644..100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -94,8 +94,8 @@ class PerfTest(unittest.TestCase):
include=([os.path.join(cls.base, "run_perf.py")]))
cls._cov.start()
import run_perf
- from testrunner.local import commands
- global commands
+ from testrunner.local import command
+ global command
global run_perf
@classmethod
@@ -125,9 +125,14 @@ class PerfTest(unittest.TestCase):
stderr=None,
timed_out=kwargs.get("timed_out", False))
for arg in args[1]]
- def execute(*args, **kwargs):
- return test_outputs.pop()
- commands.Execute = MagicMock(side_effect=execute)
+ def create_cmd(*args, **kwargs):
+ cmd = MagicMock()
+ def execute(*args, **kwargs):
+ return test_outputs.pop()
+ cmd.execute = MagicMock(side_effect=execute)
+ return cmd
+
+ command.Command = MagicMock(side_effect=create_cmd)
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
@@ -164,18 +169,23 @@ class PerfTest(unittest.TestCase):
self.assertEquals(errors, self._LoadResults()["errors"])
def _VerifyMock(self, binary, *args, **kwargs):
- arg = [path.join(path.dirname(self.base), binary)]
- arg += args
- commands.Execute.assert_called_with(
- arg, timeout=kwargs.get("timeout", 60))
+ shell = path.join(path.dirname(self.base), binary)
+ command.Command.assert_called_with(
+ cmd_prefix=[],
+ shell=shell,
+ args=list(args),
+ timeout=kwargs.get('timeout', 60))
def _VerifyMockMultiple(self, *args, **kwargs):
- expected = []
- for arg in args:
- a = [path.join(path.dirname(self.base), arg[0])]
- a += arg[1:]
- expected.append(((a,), {"timeout": kwargs.get("timeout", 60)}))
- self.assertEquals(expected, commands.Execute.call_args_list)
+ self.assertEquals(len(args), len(command.Command.call_args_list))
+ for arg, actual in zip(args, command.Command.call_args_list):
+ expected = {
+ 'cmd_prefix': [],
+ 'shell': path.join(path.dirname(self.base), arg[0]),
+ 'args': list(arg[1:]),
+ 'timeout': kwargs.get('timeout', 60)
+ }
+ self.assertEquals((expected, ), actual)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
new file mode 100755
index 0000000000..f4ff3fe1f7
--- /dev/null
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -0,0 +1,667 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Global system tests for V8 test runners and fuzzers.
+
+This hooks up the framework under tools/testrunner testing high-level scenarios
+with different test suite extensions and build configurations.
+"""
+
+# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
+# independent.
+# TODO(machenbach): Move coverage recording to a global test entry point to
+# include other unittest suites in the coverage report.
+# TODO(machenbach): Coverage data from multiprocessing doesn't work.
+# TODO(majeski): Add some tests for the fuzzers.
+
+import collections
+import contextlib
+import json
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import unittest
+
+from cStringIO import StringIO
+
+TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
+RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
+
+Result = collections.namedtuple(
+ 'Result', ['stdout', 'stderr', 'returncode'])
+
+Result.__str__ = lambda self: (
+ '\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
+ (self.returncode, self.stdout, self.stderr))
+
+
+@contextlib.contextmanager
+def temp_dir():
+ """Wrapper making a temporary directory available."""
+ path = None
+ try:
+ path = tempfile.mkdtemp('v8_test_')
+ yield path
+ finally:
+ if path:
+ shutil.rmtree(path)
+
+
+@contextlib.contextmanager
+def temp_base(baseroot='testroot1'):
+ """Wrapper that sets up a temporary V8 test root.
+
+ Args:
+ baseroot: The folder with the test root blueprint. Relevant files will be
+ copied to the temporary test root, to guarantee a fresh setup with no
+ dirty state.
+ """
+ basedir = os.path.join(TEST_DATA_ROOT, baseroot)
+ with temp_dir() as tempbase:
+ builddir = os.path.join(tempbase, 'out', 'Release')
+ testroot = os.path.join(tempbase, 'test')
+ os.makedirs(builddir)
+ shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
+ shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
+
+ for suite in os.listdir(os.path.join(basedir, 'test')):
+ os.makedirs(os.path.join(testroot, suite))
+ for entry in os.listdir(os.path.join(basedir, 'test', suite)):
+ shutil.copy(
+ os.path.join(basedir, 'test', suite, entry),
+ os.path.join(testroot, suite))
+ yield tempbase
+
+
+@contextlib.contextmanager
+def capture():
+ """Wrapper that replaces system stdout/stderr an provides the streams."""
+ oldout = sys.stdout
+ olderr = sys.stderr
+ try:
+ stdout=StringIO()
+ stderr=StringIO()
+ sys.stdout = stdout
+ sys.stderr = stderr
+ yield stdout, stderr
+ finally:
+ sys.stdout = oldout
+ sys.stderr = olderr
+
+
+def run_tests(basedir, *args, **kwargs):
+ """Executes the test runner with captured output."""
+ with capture() as (stdout, stderr):
+ sys_args = ['--command-prefix', sys.executable] + list(args)
+ if kwargs.get('infra_staging', False):
+ sys_args.append('--infra-staging')
+ code = standard_runner.StandardTestRunner(
+ basedir=basedir).execute(sys_args)
+ return Result(stdout.getvalue(), stderr.getvalue(), code)
+
+
+def override_build_config(basedir, **kwargs):
+ """Override the build config with new values provided as kwargs."""
+ path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
+ with open(path) as f:
+ config = json.load(f)
+ config.update(kwargs)
+ with open(path, 'w') as f:
+ json.dump(config, f)
+
+
+class SystemTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # Try to set up python coverage and run without it if not available.
+ cls._cov = None
+ try:
+ import coverage
+ if int(coverage.__version__.split('.')[0]) < 4:
+ cls._cov = None
+ print 'Python coverage version >= 4 required.'
+ raise ImportError()
+ cls._cov = coverage.Coverage(
+ source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
+ omit=['*unittest*', '*__init__.py'],
+ )
+ cls._cov.exclude('raise NotImplementedError')
+ cls._cov.exclude('if __name__ == .__main__.:')
+ cls._cov.exclude('except TestRunnerError:')
+ cls._cov.exclude('except KeyboardInterrupt:')
+ cls._cov.exclude('if options.verbose:')
+ cls._cov.exclude('if verbose:')
+ cls._cov.exclude('pass')
+ cls._cov.exclude('assert False')
+ cls._cov.start()
+ except ImportError:
+ print 'Running without python coverage.'
+ sys.path.append(TOOLS_ROOT)
+ global standard_runner
+ from testrunner import standard_runner
+ from testrunner.local import pool
+ pool.setup_testing()
+
+ @classmethod
+ def tearDownClass(cls):
+ if cls._cov:
+ cls._cov.stop()
+ print ''
+ print cls._cov.report(show_missing=True)
+
+ def testPass(self):
+ """Test running only passing tests in two variants.
+
+ Also test printing durations.
+ """
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ '--time',
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ )
+ self.assertIn('Running 4 tests', result.stdout, result)
+ self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
+ self.assertIn('Total time:', result.stderr, result)
+ self.assertIn('sweet/bananas', result.stderr, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testShardedProc(self):
+ with temp_base() as basedir:
+ for shard in [1, 2]:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ '--shard-count=2',
+ '--shard-run=%d' % shard,
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ infra_staging=True,
+ )
+ # One of the shards gets one variant of each test.
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ if shard == 1:
+ self.assertIn('Done running sweet/bananas', result.stdout, result)
+ else:
+ self.assertIn('Done running sweet/raspberries', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testSharded(self):
+ """Test running a particular shard."""
+ with temp_base() as basedir:
+ for shard in [1, 2]:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ '--shard-count=2',
+ '--shard-run=%d' % shard,
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ )
+ # One of the shards gets one variant of each test.
+ self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Done running sweet/bananas', result.stdout, result)
+ self.assertIn('Done running sweet/raspberries', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testFailProc(self):
+ self.testFail(infra_staging=True)
+
+ def testFail(self, infra_staging=False):
+ """Test running only failing tests in two variants."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ 'sweet/strawberries',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 2 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def check_cleaned_json_output(self, expected_results_name, actual_json):
+ # Check relevant properties of the json output.
+ with open(actual_json) as f:
+ json_output = json.load(f)[0]
+ pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
+
+ # Replace duration in actual output as it's non-deterministic. Also
+ # replace the python executable prefix as it has a different absolute
+ # path dependent on where this runs.
+ def replace_variable_data(data):
+ data['duration'] = 1
+ data['command'] = ' '.join(
+ ['/usr/bin/python'] + data['command'].split()[1:])
+ for data in json_output['slowest_tests']:
+ replace_variable_data(data)
+ for data in json_output['results']:
+ replace_variable_data(data)
+ json_output['duration_mean'] = 1
+
+ with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
+ expected_test_results = json.load(f)
+
+ msg = None # Set to pretty_json for bootstrapping.
+ self.assertDictEqual(json_output, expected_test_results, msg)
+
+ def testFailWithRerunAndJSONProc(self):
+ self.testFailWithRerunAndJSON(infra_staging=True)
+
+ def testFailWithRerunAndJSON(self, infra_staging=False):
+ """Test re-running a failing test and output to json."""
+ with temp_base() as basedir:
+ json_path = os.path.join(basedir, 'out.json')
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--rerun-failures-count=2',
+ '--random-seed=123',
+ '--json-test-results', json_path,
+ 'sweet/strawberries',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 1 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
+ self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+ if not infra_staging:
+ # We run one test, which fails and gets re-run twice.
+ self.assertIn('3 tests failed', result.stdout, result)
+ else:
+ # With test processors we don't count reruns as separated failures.
+ # TODO(majeski): fix it?
+ self.assertIn('1 tests failed', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ # TODO(majeski): Previously we only reported the variant flags in the
+ # flags field of the test result.
+ # After recent changes we report all flags, including the file names.
+ # This is redundant to the command. Needs investigation.
+ self.check_cleaned_json_output('expected_test_results1.json', json_path)
+
+ def testFlakeWithRerunAndJSONProc(self):
+ self.testFlakeWithRerunAndJSON(infra_staging=True)
+
+ def testFlakeWithRerunAndJSON(self, infra_staging=False):
+ """Test re-running a failing test and output to json."""
+ with temp_base(baseroot='testroot2') as basedir:
+ json_path = os.path.join(basedir, 'out.json')
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--rerun-failures-count=2',
+ '--random-seed=123',
+ '--json-test-results', json_path,
+ 'sweet',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 1 tests', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananaflakes: FAIL', result.stdout, result)
+ self.assertIn('1 tests failed', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananaflakes: pass', result.stdout, result)
+ self.assertIn('All tests succeeded', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+ self.check_cleaned_json_output('expected_test_results2.json', json_path)
+
+ def testAutoDetect(self):
+ """Fake a build with several auto-detected options.
+
+ Using all those options at once doesn't really make much sense. This is
+ merely for getting coverage.
+ """
+ with temp_base() as basedir:
+ override_build_config(
+ basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
+ is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
+ v8_enable_i18n_support=False, v8_target_cpu='x86',
+ v8_use_snapshot=False)
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ 'sweet/bananas',
+ )
+ expect_text = (
+ '>>> Autodetected:\n'
+ 'asan\n'
+ 'cfi_vptr\n'
+ 'dcheck_always_on\n'
+ 'msan\n'
+ 'no_i18n\n'
+ 'no_snap\n'
+ 'tsan\n'
+ 'ubsan_vptr\n'
+ '>>> Running tests for ia32.release')
+ self.assertIn(expect_text, result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+ # TODO(machenbach): Test some more implications of the auto-detected
+ # options, e.g. that the right env variables are set.
+
+ def testSkipsProc(self):
+ self.testSkips(infra_staging=True)
+
+ def testSkips(self, infra_staging=False):
+ """Test skipping tests in status file for a specific variant."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=nooptimization',
+ 'sweet/strawberries',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 0 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testDefaultProc(self):
+ self.testDefault(infra_staging=True)
+
+ def testDefault(self, infra_staging=False):
+ """Test using default test suites, though no tests are run since they don't
+ exist in a test setting.
+ """
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Warning: no tests were run!', result.stdout, result)
+ else:
+ self.assertIn('Running 0 base tests', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testNoBuildConfig(self):
+ """Test failing run when build config is not found."""
+ with temp_base() as basedir:
+ result = run_tests(basedir)
+ self.assertIn('Failed to load build config', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testGNOption(self):
+ """Test using gn option, but no gn build folder is found."""
+ with temp_base() as basedir:
+ # TODO(machenbach): This should fail gracefully.
+ with self.assertRaises(OSError):
+ run_tests(basedir, '--gn')
+
+ def testInconsistentMode(self):
+ """Test failing run when attempting to wrongly override the mode."""
+ with temp_base() as basedir:
+ override_build_config(basedir, is_debug=True)
+ result = run_tests(basedir, '--mode=Release')
+ self.assertIn('execution mode (release) for release is inconsistent '
+ 'with build config (debug)', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testInconsistentArch(self):
+ """Test failing run when attempting to wrongly override the arch."""
+ with temp_base() as basedir:
+ result = run_tests(basedir, '--mode=Release', '--arch=ia32')
+ self.assertIn(
+ '--arch value (ia32) inconsistent with build config (x64).',
+ result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testWrongVariant(self):
+ """Test using a bogus variant."""
+ with temp_base() as basedir:
+ result = run_tests(basedir, '--mode=Release', '--variants=meh')
+ self.assertEqual(1, result.returncode, result)
+
+ def testModeFromBuildConfig(self):
+ """Test auto-detection of mode from build config."""
+ with temp_base() as basedir:
+ result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
+ self.assertIn('Running tests for x64.release', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testReport(self):
+ """Test the report feature.
+
+ This also exercises various paths in statusfile logic.
+ """
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--variants=default',
+ 'sweet',
+ '--report',
+ )
+ self.assertIn(
+ '3 tests are expected to fail that we should fix',
+ result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testWarnUnusedRules(self):
+ """Test the unused-rules feature."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--variants=default,nooptimization',
+ 'sweet',
+ '--warn-unused',
+ )
+ self.assertIn( 'Unused rule: carrots', result.stdout, result)
+ self.assertIn( 'Unused rule: regress/', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testCatNoSources(self):
+ """Test printing sources, but the suite's tests have none available."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--variants=default',
+ 'sweet/bananas',
+ '--cat',
+ )
+ self.assertIn('begin source: sweet/bananas', result.stdout, result)
+ self.assertIn('(no source available)', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testPredictableProc(self):
+ self.testPredictable(infra_staging=True)
+
+ def testPredictable(self, infra_staging=False):
+ """Test running a test in verify-predictable mode.
+
+ The test will fail because of missing allocation output. We verify that and
+ that the predictable flags are passed and printed after failure.
+ """
+ with temp_base() as basedir:
+ override_build_config(basedir, v8_enable_verify_predictable=True)
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ 'sweet/bananas',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 1 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
+ self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
+ self.assertIn('Test had no allocation output', result.stdout, result)
+ self.assertIn('--predictable --verify_predictable', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testSlowArch(self):
+ """Test timeout factor manipulation on slow architecture."""
+ with temp_base() as basedir:
+ override_build_config(basedir, v8_target_cpu='arm64')
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ 'sweet/bananas',
+ )
+ # TODO(machenbach): We don't have a way for testing if the correct
+ # timeout was used.
+ self.assertEqual(0, result.returncode, result)
+
+ def testRandomSeedStressWithDefault(self):
+ """Test using random-seed-stress feature has the right number of tests."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--random-seed-stress-count=2',
+ 'sweet/bananas',
+ )
+ self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testRandomSeedStressWithSeed(self):
+ """Test using random-seed-stress feature passing a random seed."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--random-seed-stress-count=2',
+ '--random-seed=123',
+ 'sweet/strawberries',
+ )
+ self.assertIn('Running 2 tests', result.stdout, result)
+ # We use a failing test so that the command is printed and we can verify
+ # that the right random seed was passed.
+ self.assertIn('--random-seed=123', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testSpecificVariants(self):
+ """Test using NO_VARIANTS modifiers in status files skips the desire tests.
+
+ The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
+ But the status file applies a modifier to each skipping one of the
+ variants.
+ """
+ with temp_base() as basedir:
+ override_build_config(basedir, v8_use_snapshot=False)
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ )
+ # Both tests are either marked as running in only default or only
+ # slow variant.
+ self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testStatusFilePresubmit(self):
+ """Test that the fake status file is well-formed."""
+ with temp_base() as basedir:
+ from testrunner.local import statusfile
+ self.assertTrue(statusfile.PresubmitCheck(
+ os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
+
+ def testDotsProgressProc(self):
+ self.testDotsProgress(infra_staging=True)
+
+ def testDotsProgress(self, infra_staging=False):
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=dots',
+ 'sweet/cherries',
+ 'sweet/bananas',
+ '--no-sorting', '-j1', # make results order deterministic
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 2 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 2 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ self.assertIn('F.', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testMonoProgressProc(self):
+ self._testCompactProgress('mono', True)
+
+ def testMonoProgress(self):
+ self._testCompactProgress('mono', False)
+
+ def testColorProgressProc(self):
+ self._testCompactProgress('color', True)
+
+ def testColorProgress(self):
+ self._testCompactProgress('color', False)
+
+ def _testCompactProgress(self, name, infra_staging):
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=%s' % name,
+ 'sweet/cherries',
+ 'sweet/bananas',
+ infra_staging=infra_staging,
+ )
+ if name == 'color':
+ expected = ('\033[34m% 100\033[0m|'
+ '\033[32m+ 1\033[0m|'
+ '\033[31m- 1\033[0m]: Done')
+ else:
+ expected = '% 100|+ 1|- 1]: Done'
+ self.assertIn(expected, result.stdout)
+ self.assertIn('sweet/cherries', result.stdout)
+ self.assertIn('sweet/bananas', result.stdout)
+ self.assertEqual(1, result.returncode, result)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
new file mode 100644
index 0000000000..172b87a5d6
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -0,0 +1,107 @@
+{
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
+ "results": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "stderr": "",
+ "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "stderr": "",
+ "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ }
+ ],
+ "slowest_tests": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "marked_slow": true,
+ "name": "sweet/strawberries"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "marked_slow": true,
+ "name": "sweet/strawberries"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "marked_slow": true,
+ "name": "sweet/strawberries"
+ }
+ ],
+ "test_total": 3
+}
+
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
new file mode 100644
index 0000000000..7fcfe47f71
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -0,0 +1,74 @@
+{
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
+ "results": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "exit_code": 0,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "PASS",
+ "run": 2,
+ "stderr": "",
+ "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ }
+ ],
+ "slowest_tests": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "marked_slow": false,
+ "name": "sweet/bananaflakes"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "marked_slow": false,
+ "name": "sweet/bananaflakes"
+ }
+ ],
+ "test_total": 2
+}
diff --git a/deps/v8/tools/unittests/testdata/predictable_mocked.py b/deps/v8/tools/unittests/testdata/predictable_mocked.py
new file mode 100644
index 0000000000..cc332c2c46
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/predictable_mocked.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+assert len(sys.argv) == 3
+
+if sys.argv[1] == 'equal':
+ # 1. Scenario: print equal allocation hashes.
+ print '### Allocations = 9497, hash = 0xc322c6b0'
+elif sys.argv[1] == 'differ':
+ # 2. Scenario: print different allocation hashes. This prints a different
+ # hash on the second run, based on the content of a semaphore file. This
+ # file is expected to be empty in the beginning.
+ with open(sys.argv[2]) as f:
+ if f.read():
+ print '### Allocations = 9497, hash = 0xc322c6b0'
+ else:
+ print '### Allocations = 9497, hash = 0xc322c6b1'
+ with open(sys.argv[2], 'w') as f:
+ f.write('something')
+else:
+ # 3. Scenario: missing allocation hashes. Don't print anything.
+ assert 'missing'
+
+sys.exit(0)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py b/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
new file mode 100644
index 0000000000..c7ca55a571
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
@@ -0,0 +1,16 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
+"""
+
+import sys
+
+args = ' '.join(sys.argv[1:])
+print args
+# Let all berries fail.
+if 'berries' in args:
+ sys.exit(1)
+sys.exit(0)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
new file mode 100644
index 0000000000..74214631dc
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
@@ -0,0 +1,35 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+[ALWAYS, {
+ 'raspberries': FAIL,
+ 'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
+
+ # Both cherries and apples are to test how PASS an FAIL from different
+ # sections are merged.
+ 'cherries': [PASS, SLOW],
+ 'apples': [FAIL],
+
+ # Unused rule.
+ 'carrots': [PASS, FAIL],
+}],
+
+['variant == nooptimization', {
+ 'strawberries': [SKIP],
+}],
+
+['arch == x64', {
+ 'cherries': [FAIL],
+ 'apples': [PASS, SLOW],
+
+ # Unused rule.
+ 'regress/*': [CRASH],
+}],
+
+['no_snap', {
+ 'bananas': [PASS, NO_VARIANTS],
+ 'raspberries': [FAIL, NO_VARIANTS],
+}],
+]
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
new file mode 100644
index 0000000000..115471ac72
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -0,0 +1,31 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy test suite extension with some fruity tests.
+"""
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+class TestSuite(testsuite.TestSuite):
+ def ListTests(self, context):
+ return map(
+ self._create_test,
+ ['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
+ )
+
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def get_shell(self):
+ return 'd8_mocked.py'
+
+ def _get_files_params(self, ctx):
+ return [self.name]
+
+def GetSuite(name, root):
+ return TestSuite(name, root)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
new file mode 100644
index 0000000000..c5e3ee35f1
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -0,0 +1,18 @@
+{
+ "current_cpu": "x64",
+ "dcheck_always_on": false,
+ "is_asan": false,
+ "is_cfi": false,
+ "is_component_build": false,
+ "is_debug": false,
+ "is_gcov_coverage": false,
+ "is_ubsan_vptr": false,
+ "is_msan": false,
+ "is_tsan": false,
+ "target_cpu": "x64",
+ "v8_current_cpu": "x64",
+ "v8_enable_i18n_support": true,
+ "v8_enable_verify_predictable": false,
+ "v8_target_cpu": "x64",
+ "v8_use_snapshot": true
+}
diff --git a/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py b/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
new file mode 100644
index 0000000000..e66e299bc6
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
@@ -0,0 +1,29 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy d8 replacement for flaky tests.
+"""
+
+import os
+import sys
+
+PATH = os.path.dirname(os.path.abspath(__file__))
+
+print ' '.join(sys.argv[1:])
+
+# Test files ending in 'flakes' should first fail then pass. We store state in
+# a file side by side with the executable. No clean-up required as all tests
+# run in a temp test root. Restriction: Only one variant is supported for now.
+for arg in sys.argv[1:]:
+ if arg.endswith('flakes'):
+ flake_state = os.path.join(PATH, arg)
+ if os.path.exists(flake_state):
+ sys.exit(0)
+ else:
+ with open(flake_state, 'w') as f:
+ f.write('something')
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status
new file mode 100644
index 0000000000..9ad8c81948
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status
@@ -0,0 +1,6 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
new file mode 100644
index 0000000000..9407769b35
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy test suite extension with some flaky fruity tests.
+"""
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+class TestSuite(testsuite.TestSuite):
+ def ListTests(self, context):
+ return map(
+ self._create_test,
+ ['bananaflakes'],
+ )
+
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def get_shell(self):
+ return 'd8_mocked.py'
+
+ def _get_files_params(self, ctx):
+ return [self.name]
+
+def GetSuite(name, root):
+ return TestSuite(name, root)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
new file mode 100644
index 0000000000..c5e3ee35f1
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -0,0 +1,18 @@
+{
+ "current_cpu": "x64",
+ "dcheck_always_on": false,
+ "is_asan": false,
+ "is_cfi": false,
+ "is_component_build": false,
+ "is_debug": false,
+ "is_gcov_coverage": false,
+ "is_ubsan_vptr": false,
+ "is_msan": false,
+ "is_tsan": false,
+ "target_cpu": "x64",
+ "v8_current_cpu": "x64",
+ "v8_enable_i18n_support": true,
+ "v8_enable_verify_predictable": false,
+ "v8_target_cpu": "x64",
+ "v8_use_snapshot": true
+}
diff --git a/deps/v8/tools/v8-rolls.sh b/deps/v8/tools/v8-rolls.sh
deleted file mode 100755
index 590e05c1f9..0000000000
--- a/deps/v8/tools/v8-rolls.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-########## Global variable definitions
-
-DEPS_STRING='"v8_revision":'
-INFO=tools/v8-info.sh
-
-V8="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
-
-########## Function definitions
-
-usage() {
-cat << EOF
-usage: $0 OPTIONS
-
-Run in chromium/src to get information about V8 rolls.
-
-OPTIONS:
- -h Show this message.
- -n Number of rolls to print information about.
- -s Chromium git hash to start printing V8 information about.
-EOF
-}
-
-v8_line() {
- git show $1:DEPS | grep -n $DEPS_STRING | cut -d":" -f1
-}
-
-v8_info() {
- git blame -L$(v8_line $1),+1 $1 DEPS | grep $DEPS_STRING
-}
-
-v8_svn() {
- sed -e 's/^.*"\([0-9]\+\)",$/\1/'
-}
-
-v8_roll() {
- cut -d" " -f1
-}
-
-find_rev() {
- git svn find-rev $1
-}
-
-msg() {
- msg=$(git log --format="%h %ci %ce" -1 $1)
- h=$(echo $msg | cut -d" " -f1)
- d=$(echo $msg | cut -d" " -f2)
- t=$(echo $msg | cut -d" " -f3)
- a=$(echo $msg | cut -d" " -f5)
- a1=$(echo $a | cut -d"@" -f1)
- a2=$(echo $a | cut -d"@" -f2)
- echo $h $d $t $a1@$a2
-}
-
-v8_revision() {
- cd $V8
- $INFO -v $1
-}
-
-rolls() {
- roll=$2
- for i in $(seq 1 $1); do
- info=$(v8_info $roll)
- roll=$(echo $info | v8_roll $roll)
- trunk=$(echo $info | v8_svn $roll)
- echo "$(v8_revision $trunk) $trunk $(find_rev $roll) $(msg $roll)"
- roll=$roll^1
- done
-}
-
-########## Option parsing
-
-REVISIONS=1
-START=HEAD
-
-while getopts ":hn:s:" OPTION ; do
- case $OPTION in
- h) usage
- exit 0
- ;;
- n) REVISIONS=$OPTARG
- ;;
- s) START=$OPTARG
- ;;
- ?) echo "Illegal option: -$OPTARG"
- usage
- exit 1
- ;;
- esac
-done
-
-rolls $REVISIONS $START
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index d5765a6a04..c96741a9a1 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -51,113 +51,115 @@ INSTANCE_TYPES = {
147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
148: "FIXED_DOUBLE_ARRAY_TYPE",
149: "FILLER_TYPE",
- 150: "ACCESSOR_INFO_TYPE",
- 151: "ACCESSOR_PAIR_TYPE",
- 152: "ACCESS_CHECK_INFO_TYPE",
- 153: "INTERCEPTOR_INFO_TYPE",
- 154: "FUNCTION_TEMPLATE_INFO_TYPE",
- 155: "OBJECT_TEMPLATE_INFO_TYPE",
- 156: "ALLOCATION_SITE_TYPE",
- 157: "ALLOCATION_MEMENTO_TYPE",
- 158: "SCRIPT_TYPE",
- 159: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 160: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
- 161: "PROMISE_REACTION_JOB_INFO_TYPE",
- 162: "DEBUG_INFO_TYPE",
- 163: "STACK_FRAME_INFO_TYPE",
- 164: "PROTOTYPE_INFO_TYPE",
- 165: "TUPLE2_TYPE",
- 166: "TUPLE3_TYPE",
- 167: "CONTEXT_EXTENSION_TYPE",
- 168: "MODULE_TYPE",
- 169: "MODULE_INFO_ENTRY_TYPE",
- 170: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 150: "ACCESS_CHECK_INFO_TYPE",
+ 151: "ACCESSOR_INFO_TYPE",
+ 152: "ACCESSOR_PAIR_TYPE",
+ 153: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 154: "ALLOCATION_MEMENTO_TYPE",
+ 155: "ALLOCATION_SITE_TYPE",
+ 156: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 157: "CONTEXT_EXTENSION_TYPE",
+ 158: "DEBUG_INFO_TYPE",
+ 159: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 160: "INTERCEPTOR_INFO_TYPE",
+ 161: "MODULE_INFO_ENTRY_TYPE",
+ 162: "MODULE_TYPE",
+ 163: "OBJECT_TEMPLATE_INFO_TYPE",
+ 164: "PROMISE_REACTION_JOB_INFO_TYPE",
+ 165: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
+ 166: "PROTOTYPE_INFO_TYPE",
+ 167: "SCRIPT_TYPE",
+ 168: "STACK_FRAME_INFO_TYPE",
+ 169: "TUPLE2_TYPE",
+ 170: "TUPLE3_TYPE",
171: "FIXED_ARRAY_TYPE",
- 172: "HASH_TABLE_TYPE",
- 173: "DESCRIPTOR_ARRAY_TYPE",
+ 172: "DESCRIPTOR_ARRAY_TYPE",
+ 173: "HASH_TABLE_TYPE",
174: "TRANSITION_ARRAY_TYPE",
- 175: "FEEDBACK_VECTOR_TYPE",
- 176: "PROPERTY_ARRAY_TYPE",
- 177: "SHARED_FUNCTION_INFO_TYPE",
- 178: "CELL_TYPE",
- 179: "WEAK_CELL_TYPE",
+ 175: "CELL_TYPE",
+ 176: "CODE_DATA_CONTAINER_TYPE",
+ 177: "FEEDBACK_VECTOR_TYPE",
+ 178: "LOAD_HANDLER_TYPE",
+ 179: "PROPERTY_ARRAY_TYPE",
180: "PROPERTY_CELL_TYPE",
- 181: "SMALL_ORDERED_HASH_MAP_TYPE",
- 182: "SMALL_ORDERED_HASH_SET_TYPE",
- 183: "CODE_DATA_CONTAINER_TYPE",
- 184: "JS_PROXY_TYPE",
- 185: "JS_GLOBAL_OBJECT_TYPE",
- 186: "JS_GLOBAL_PROXY_TYPE",
- 187: "JS_MODULE_NAMESPACE_TYPE",
- 188: "JS_SPECIAL_API_OBJECT_TYPE",
- 189: "JS_VALUE_TYPE",
- 190: "JS_MESSAGE_OBJECT_TYPE",
- 191: "JS_DATE_TYPE",
- 192: "JS_API_OBJECT_TYPE",
- 193: "JS_OBJECT_TYPE",
- 194: "JS_ARGUMENTS_TYPE",
- 195: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 196: "JS_GENERATOR_OBJECT_TYPE",
- 197: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 198: "JS_ARRAY_TYPE",
- 199: "JS_ARRAY_BUFFER_TYPE",
- 200: "JS_TYPED_ARRAY_TYPE",
- 201: "JS_DATA_VIEW_TYPE",
- 202: "JS_SET_TYPE",
- 203: "JS_MAP_TYPE",
- 204: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 205: "JS_SET_VALUE_ITERATOR_TYPE",
- 206: "JS_MAP_KEY_ITERATOR_TYPE",
- 207: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 208: "JS_MAP_VALUE_ITERATOR_TYPE",
- 209: "JS_WEAK_MAP_TYPE",
- 210: "JS_WEAK_SET_TYPE",
- 211: "JS_PROMISE_TYPE",
- 212: "JS_REGEXP_TYPE",
- 213: "JS_ERROR_TYPE",
- 214: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 215: "JS_STRING_ITERATOR_TYPE",
- 216: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
- 217: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
- 218: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
- 219: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 220: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 221: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 222: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 223: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 224: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 225: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 226: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 227: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 228: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 229: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 230: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 231: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 232: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 233: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 234: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 235: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 236: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 237: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 238: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 239: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 240: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 241: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 242: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 243: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 244: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 245: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 246: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 247: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 248: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 249: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 250: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 251: "WASM_INSTANCE_TYPE",
- 252: "WASM_MEMORY_TYPE",
- 253: "WASM_MODULE_TYPE",
- 254: "WASM_TABLE_TYPE",
- 255: "JS_BOUND_FUNCTION_TYPE",
- 256: "JS_FUNCTION_TYPE",
+ 181: "SHARED_FUNCTION_INFO_TYPE",
+ 182: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 183: "SMALL_ORDERED_HASH_SET_TYPE",
+ 184: "STORE_HANDLER_TYPE",
+ 185: "WEAK_CELL_TYPE",
+ 1024: "JS_PROXY_TYPE",
+ 1025: "JS_GLOBAL_OBJECT_TYPE",
+ 1026: "JS_GLOBAL_PROXY_TYPE",
+ 1027: "JS_MODULE_NAMESPACE_TYPE",
+ 1040: "JS_SPECIAL_API_OBJECT_TYPE",
+ 1041: "JS_VALUE_TYPE",
+ 1056: "JS_API_OBJECT_TYPE",
+ 1057: "JS_OBJECT_TYPE",
+ 1058: "JS_ARGUMENTS_TYPE",
+ 1059: "JS_ARRAY_BUFFER_TYPE",
+ 1060: "JS_ARRAY_TYPE",
+ 1061: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 1062: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 1063: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 1064: "JS_DATE_TYPE",
+ 1065: "JS_ERROR_TYPE",
+ 1066: "JS_GENERATOR_OBJECT_TYPE",
+ 1067: "JS_MAP_TYPE",
+ 1068: "JS_MAP_KEY_ITERATOR_TYPE",
+ 1069: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 1070: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 1071: "JS_MESSAGE_OBJECT_TYPE",
+ 1072: "JS_PROMISE_TYPE",
+ 1073: "JS_REGEXP_TYPE",
+ 1074: "JS_SET_TYPE",
+ 1075: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 1076: "JS_SET_VALUE_ITERATOR_TYPE",
+ 1077: "JS_STRING_ITERATOR_TYPE",
+ 1078: "JS_WEAK_MAP_TYPE",
+ 1079: "JS_WEAK_SET_TYPE",
+ 1080: "JS_TYPED_ARRAY_TYPE",
+ 1081: "JS_DATA_VIEW_TYPE",
+ 1082: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
+ 1083: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
+ 1084: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
+ 1085: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1086: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1087: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1088: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1089: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1090: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1091: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1092: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1093: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1094: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1095: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1096: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1097: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1098: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1099: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1100: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1101: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1102: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1103: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1104: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1105: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1106: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1107: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1108: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1109: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 1110: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1111: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1112: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 1113: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 1114: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1115: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1116: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 1117: "WASM_INSTANCE_TYPE",
+ 1118: "WASM_MEMORY_TYPE",
+ 1119: "WASM_MODULE_TYPE",
+ 1120: "WASM_TABLE_TYPE",
+ 1121: "JS_BOUND_FUNCTION_TYPE",
+ 1122: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -165,7 +167,7 @@ KNOWN_MAPS = {
0x02201: (138, "FreeSpaceMap"),
0x02251: (132, "MetaMap"),
0x022a1: (131, "NullMap"),
- 0x022f1: (173, "DescriptorArrayMap"),
+ 0x022f1: (172, "DescriptorArrayMap"),
0x02341: (171, "FixedArrayMap"),
0x02391: (149, "OnePointerFillerMap"),
0x023e1: (149, "TwoPointerFillerMap"),
@@ -177,19 +179,19 @@ KNOWN_MAPS = {
0x025c1: (131, "BooleanMap"),
0x02611: (136, "ByteArrayMap"),
0x02661: (171, "FixedCOWArrayMap"),
- 0x026b1: (172, "HashTableMap"),
+ 0x026b1: (173, "HashTableMap"),
0x02701: (128, "SymbolMap"),
0x02751: (72, "OneByteStringMap"),
0x027a1: (171, "ScopeInfoMap"),
- 0x027f1: (177, "SharedFunctionInfoMap"),
+ 0x027f1: (181, "SharedFunctionInfoMap"),
0x02841: (133, "CodeMap"),
0x02891: (171, "FunctionContextMap"),
- 0x028e1: (178, "CellMap"),
- 0x02931: (179, "WeakCellMap"),
+ 0x028e1: (175, "CellMap"),
+ 0x02931: (185, "WeakCellMap"),
0x02981: (180, "GlobalPropertyCellMap"),
0x029d1: (135, "ForeignMap"),
0x02a21: (174, "TransitionArrayMap"),
- 0x02a71: (175, "FeedbackVectorMap"),
+ 0x02a71: (177, "FeedbackVectorMap"),
0x02ac1: (131, "ArgumentsMarkerMap"),
0x02b11: (131, "ExceptionMap"),
0x02b61: (131, "TerminationExceptionMap"),
@@ -204,79 +206,80 @@ KNOWN_MAPS = {
0x02e31: (171, "WithContextMap"),
0x02e81: (171, "DebugEvaluateContextMap"),
0x02ed1: (171, "ScriptContextTableMap"),
- 0x02f21: (148, "FixedDoubleArrayMap"),
- 0x02f71: (134, "MutableHeapNumberMap"),
- 0x02fc1: (172, "OrderedHashMapMap"),
- 0x03011: (172, "OrderedHashSetMap"),
- 0x03061: (172, "NameDictionaryMap"),
- 0x030b1: (172, "GlobalDictionaryMap"),
- 0x03101: (172, "NumberDictionaryMap"),
- 0x03151: (172, "StringTableMap"),
- 0x031a1: (172, "WeakHashTableMap"),
- 0x031f1: (171, "SloppyArgumentsElementsMap"),
- 0x03241: (181, "SmallOrderedHashMapMap"),
- 0x03291: (182, "SmallOrderedHashSetMap"),
- 0x032e1: (183, "CodeDataContainerMap"),
- 0x03331: (190, "JSMessageObjectMap"),
- 0x03381: (193, "ExternalMap"),
- 0x033d1: (137, "BytecodeArrayMap"),
- 0x03421: (171, "ModuleInfoMap"),
- 0x03471: (178, "NoClosuresCellMap"),
- 0x034c1: (178, "OneClosureCellMap"),
- 0x03511: (178, "ManyClosuresCellMap"),
- 0x03561: (176, "PropertyArrayMap"),
- 0x035b1: (130, "BigIntMap"),
- 0x03601: (106, "NativeSourceStringMap"),
- 0x03651: (64, "StringMap"),
- 0x036a1: (73, "ConsOneByteStringMap"),
- 0x036f1: (65, "ConsStringMap"),
- 0x03741: (77, "ThinOneByteStringMap"),
- 0x03791: (69, "ThinStringMap"),
- 0x037e1: (67, "SlicedStringMap"),
- 0x03831: (75, "SlicedOneByteStringMap"),
- 0x03881: (66, "ExternalStringMap"),
- 0x038d1: (82, "ExternalStringWithOneByteDataMap"),
- 0x03921: (74, "ExternalOneByteStringMap"),
- 0x03971: (98, "ShortExternalStringMap"),
- 0x039c1: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x03a11: (0, "InternalizedStringMap"),
- 0x03a61: (2, "ExternalInternalizedStringMap"),
- 0x03ab1: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x03b01: (10, "ExternalOneByteInternalizedStringMap"),
- 0x03b51: (34, "ShortExternalInternalizedStringMap"),
- 0x03ba1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x03bf1: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x03c41: (106, "ShortExternalOneByteStringMap"),
- 0x03c91: (140, "FixedUint8ArrayMap"),
- 0x03ce1: (139, "FixedInt8ArrayMap"),
- 0x03d31: (142, "FixedUint16ArrayMap"),
- 0x03d81: (141, "FixedInt16ArrayMap"),
- 0x03dd1: (144, "FixedUint32ArrayMap"),
- 0x03e21: (143, "FixedInt32ArrayMap"),
- 0x03e71: (145, "FixedFloat32ArrayMap"),
- 0x03ec1: (146, "FixedFloat64ArrayMap"),
- 0x03f11: (147, "FixedUint8ClampedArrayMap"),
- 0x03f61: (165, "Tuple2Map"),
- 0x03fb1: (158, "ScriptMap"),
- 0x04001: (153, "InterceptorInfoMap"),
- 0x04051: (150, "AccessorInfoMap"),
- 0x040a1: (151, "AccessorPairMap"),
- 0x040f1: (152, "AccessCheckInfoMap"),
- 0x04141: (154, "FunctionTemplateInfoMap"),
- 0x04191: (155, "ObjectTemplateInfoMap"),
- 0x041e1: (156, "AllocationSiteMap"),
- 0x04231: (157, "AllocationMementoMap"),
- 0x04281: (159, "AliasedArgumentsEntryMap"),
- 0x042d1: (160, "PromiseResolveThenableJobInfoMap"),
- 0x04321: (161, "PromiseReactionJobInfoMap"),
- 0x04371: (162, "DebugInfoMap"),
- 0x043c1: (163, "StackFrameInfoMap"),
- 0x04411: (164, "PrototypeInfoMap"),
- 0x04461: (166, "Tuple3Map"),
- 0x044b1: (167, "ContextExtensionMap"),
- 0x04501: (168, "ModuleMap"),
- 0x04551: (169, "ModuleInfoEntryMap"),
- 0x045a1: (170, "AsyncGeneratorRequestMap"),
+ 0x02f21: (171, "ArrayListMap"),
+ 0x02f71: (148, "FixedDoubleArrayMap"),
+ 0x02fc1: (134, "MutableHeapNumberMap"),
+ 0x03011: (173, "OrderedHashMapMap"),
+ 0x03061: (173, "OrderedHashSetMap"),
+ 0x030b1: (173, "NameDictionaryMap"),
+ 0x03101: (173, "GlobalDictionaryMap"),
+ 0x03151: (173, "NumberDictionaryMap"),
+ 0x031a1: (173, "StringTableMap"),
+ 0x031f1: (173, "WeakHashTableMap"),
+ 0x03241: (171, "SloppyArgumentsElementsMap"),
+ 0x03291: (182, "SmallOrderedHashMapMap"),
+ 0x032e1: (183, "SmallOrderedHashSetMap"),
+ 0x03331: (176, "CodeDataContainerMap"),
+ 0x03381: (1071, "JSMessageObjectMap"),
+ 0x033d1: (1057, "ExternalMap"),
+ 0x03421: (137, "BytecodeArrayMap"),
+ 0x03471: (171, "ModuleInfoMap"),
+ 0x034c1: (175, "NoClosuresCellMap"),
+ 0x03511: (175, "OneClosureCellMap"),
+ 0x03561: (175, "ManyClosuresCellMap"),
+ 0x035b1: (179, "PropertyArrayMap"),
+ 0x03601: (130, "BigIntMap"),
+ 0x03651: (106, "NativeSourceStringMap"),
+ 0x036a1: (64, "StringMap"),
+ 0x036f1: (73, "ConsOneByteStringMap"),
+ 0x03741: (65, "ConsStringMap"),
+ 0x03791: (77, "ThinOneByteStringMap"),
+ 0x037e1: (69, "ThinStringMap"),
+ 0x03831: (67, "SlicedStringMap"),
+ 0x03881: (75, "SlicedOneByteStringMap"),
+ 0x038d1: (66, "ExternalStringMap"),
+ 0x03921: (82, "ExternalStringWithOneByteDataMap"),
+ 0x03971: (74, "ExternalOneByteStringMap"),
+ 0x039c1: (98, "ShortExternalStringMap"),
+ 0x03a11: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x03a61: (0, "InternalizedStringMap"),
+ 0x03ab1: (2, "ExternalInternalizedStringMap"),
+ 0x03b01: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x03b51: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03ba1: (34, "ShortExternalInternalizedStringMap"),
+ 0x03bf1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x03c41: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03c91: (106, "ShortExternalOneByteStringMap"),
+ 0x03ce1: (140, "FixedUint8ArrayMap"),
+ 0x03d31: (139, "FixedInt8ArrayMap"),
+ 0x03d81: (142, "FixedUint16ArrayMap"),
+ 0x03dd1: (141, "FixedInt16ArrayMap"),
+ 0x03e21: (144, "FixedUint32ArrayMap"),
+ 0x03e71: (143, "FixedInt32ArrayMap"),
+ 0x03ec1: (145, "FixedFloat32ArrayMap"),
+ 0x03f11: (146, "FixedFloat64ArrayMap"),
+ 0x03f61: (147, "FixedUint8ClampedArrayMap"),
+ 0x03fb1: (169, "Tuple2Map"),
+ 0x04001: (167, "ScriptMap"),
+ 0x04051: (160, "InterceptorInfoMap"),
+ 0x040a1: (151, "AccessorInfoMap"),
+ 0x040f1: (150, "AccessCheckInfoMap"),
+ 0x04141: (152, "AccessorPairMap"),
+ 0x04191: (153, "AliasedArgumentsEntryMap"),
+ 0x041e1: (154, "AllocationMementoMap"),
+ 0x04231: (155, "AllocationSiteMap"),
+ 0x04281: (156, "AsyncGeneratorRequestMap"),
+ 0x042d1: (157, "ContextExtensionMap"),
+ 0x04321: (158, "DebugInfoMap"),
+ 0x04371: (159, "FunctionTemplateInfoMap"),
+ 0x043c1: (161, "ModuleInfoEntryMap"),
+ 0x04411: (162, "ModuleMap"),
+ 0x04461: (163, "ObjectTemplateInfoMap"),
+ 0x044b1: (164, "PromiseReactionJobInfoMap"),
+ 0x04501: (165, "PromiseResolveThenableJobInfoMap"),
+ 0x04551: (166, "PrototypeInfoMap"),
+ 0x045a1: (168, "StackFrameInfoMap"),
+ 0x045f1: (170, "Tuple3Map"),
}
# List of known V8 objects.
@@ -349,6 +352,7 @@ FRAME_MARKERS = (
"ARGUMENTS_ADAPTOR",
"BUILTIN",
"BUILTIN_EXIT",
+ "NATIVE",
)
# This set of constants is generated from a shipping build.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index ffdef0d820..c4d18a3333 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -3,15 +3,27 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# Exit immediately if a command exits with a non-zero status.
set -e
+# Treat unset variables as an error when substituting.
+set -u
+
+# return value of a pipeline is the status of the last command to exit with a
+# non-zero status, or zero if no command exited with a non-zero status
+set -o pipefail
+
TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
V8_DIR="${TOOLS_WASM_DIR}/../.."
+SPEC_TEST_DIR=${V8_DIR}/test/wasm-spec-tests
cd ${V8_DIR}
-mkdir -p ./test/wasm-spec-tests/tests/
-rm -rf ./test/wasm-spec-tests/tests/*
+rm -rf ${SPEC_TEST_DIR}/tests
+mkdir ${SPEC_TEST_DIR}/tests
+
+rm -rf ${SPEC_TEST_DIR}/tmp
+mkdir ${SPEC_TEST_DIR}/tmp
./tools/dev/gm.py x64.release d8
@@ -20,11 +32,14 @@ make clean all
cd ${V8_DIR}/test/wasm-js/test/core
-./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8
-cp ${V8_DIR}/test/wasm-js/test/core/output/*.js ${V8_DIR}/test/wasm-spec-tests/tests
+./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8 --out ${SPEC_TEST_DIR}/tmp
+cp ${SPEC_TEST_DIR}/tmp/*.js ${SPEC_TEST_DIR}/tests/
+rm -rf ${SPEC_TEST_DIR}/tmp
-cd ${V8_DIR}/test/wasm-spec-tests
+cd ${SPEC_TEST_DIR}
+echo
+echo "The following files will get uploaded:"
+ls tests
+echo
upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
-
-
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index a39b5f1e45..ed5e51f96a 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,7 +5,8 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up......
-The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles...............
+The Smi looked at them when a crazy v8-autoroll account showed up...
+The autoroller bought a round of Himbeerbrause. Suddenly...
+The bartender starts to shake the bottles.......................
+.
.